patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -243,12 +243,12 @@ public class SmartStoreLoadTest extends InstrumentationTestCase {
// Without indexing for new index specs
alterSoup("Adding one index / no re-indexing", false, new IndexSpec[]{new IndexSpec("k_0", indexType), new IndexSpec("k_1", indexType)});
alterSoup("Adding one index / dropping one index / no re-indexing", false, new IndexSpec[] {new IndexSpec("k_0", indexType), new IndexSpec("k_2", indexType)});
- alterSoup("Dropping two indexes / no re-indexing", false, new IndexSpec[] {new IndexSpec("k_3", indexType)});
+ alterSoup("Dropping one index / no re-indexing", false, new IndexSpec[] {new IndexSpec("k_0", indexType)});
// With indexing for new index specs
alterSoup("Adding one index / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_0", indexType), new IndexSpec("k_1", indexType)});
alterSoup("Adding one index / dropping one index / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_0", indexType), new IndexSpec("k_2", indexType)});
- alterSoup("Dropping two indexes / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_3", indexType)});
+ alterSoup("Dropping one index / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_0", indexType)});
}
private void alterSoup(String msg, boolean reIndexData, IndexSpec[] indexSpecs) throws JSONException { | 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.store;
import android.content.Context;
import android.test.InstrumentationTestCase;
import android.util.Log;
import com.salesforce.androidsdk.smartstore.store.DBHelper;
import com.salesforce.androidsdk.smartstore.store.DBOpenHelper;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
import net.sqlcipher.database.SQLiteDatabase;
import net.sqlcipher.database.SQLiteOpenHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.List;
/**
* Set of tests for the smart store loading numerous and/or large entries and querying them back
*/
public class SmartStoreLoadTest extends InstrumentationTestCase {
private static final String TEST_SOUP = "test_soup";
private static final int NUMBER_ENTRIES = 10000;
private static final int NUMBER_ENTRIES_PER_BATCH = 100;
private static final int NS_IN_MS = 1000000;
protected Context targetContext;
private SmartStore store;
//
// Setup and tear down
//
@Override
public void setUp() throws Exception {
super.setUp();
targetContext = getInstrumentation().getTargetContext();
final SQLiteOpenHelper dbOpenHelper = DBOpenHelper.getOpenHelper(targetContext, null);
DBHelper.getInstance(dbOpenHelper.getWritableDatabase(getPasscode())).reset(targetContext, null);
store = new SmartStore(dbOpenHelper, getPasscode());
store.dropAllSoups();
}
@Override
protected void tearDown() throws Exception {
final SQLiteDatabase db = DBOpenHelper.getOpenHelper(targetContext, null).getWritableDatabase(getPasscode());
db.close();
super.tearDown();
}
//
// Tests
//
public void testUpsertQuery1StringIndex1field20characters() throws JSONException {
tryUpsertQuery(Type.string, NUMBER_ENTRIES, 1, 20, 1);
}
public void testUpsertQuery1StringIndex1field1000characters() throws JSONException {
tryUpsertQuery(Type.string, NUMBER_ENTRIES, 1, 1000, 1);
}
public void testUpsertQuery1StringIndex10fields20characters() throws JSONException {
tryUpsertQuery(Type.string, NUMBER_ENTRIES, 10, 20, 1);
}
public void testUpsertQuery10StringIndexes10fields20characters() throws JSONException {
tryUpsertQuery(Type.string, NUMBER_ENTRIES, 10, 20, 10);
}
public void testUpsertQuery1JSON1Index1field20characters() throws JSONException {
tryUpsertQuery(Type.json1, NUMBER_ENTRIES, 1, 20, 1);
}
public void testUpsertQuery1JSON1Index1field1000characters() throws JSONException {
tryUpsertQuery(Type.json1, NUMBER_ENTRIES, 1, 1000, 1);
}
public void testUpsertQuery1JSON1Index10fields20characters() throws JSONException {
tryUpsertQuery(Type.json1, NUMBER_ENTRIES, 10, 20, 1);
}
public void testUpsertQuery10JSON1Indexes10fields20characters() throws JSONException {
tryUpsertQuery(Type.json1, NUMBER_ENTRIES, 10, 20, 10);
}
public void testAlterSoupClassicIndexing() throws JSONException {
tryAlterSoup(Type.string);
}
public void testAlterSoupJSON1Indexing() throws JSONException {
tryAlterSoup(Type.json1);
}
//
// Helper methods
//
protected String getPasscode() {
return "";
}
protected String getTag() {
return getClass().getSimpleName();
}
private void tryUpsertQuery(Type indexType, int numberEntries, int numberFieldsPerEntry, int numberCharactersPerField, int numberIndexes) throws JSONException {
setupSoup(TEST_SOUP, numberIndexes, indexType);
upsertEntries(numberEntries / NUMBER_ENTRIES_PER_BATCH, NUMBER_ENTRIES_PER_BATCH, numberFieldsPerEntry, numberCharactersPerField);
queryEntries();
}
private void setupSoup(String soupName, int numberIndexes, Type indexType) {
IndexSpec[] indexSpecs = new IndexSpec[numberIndexes];
for (int indexNumber=0; indexNumber<numberIndexes; indexNumber++) {
indexSpecs[indexNumber] = new IndexSpec("k_" + indexNumber, indexType);
}
store.registerSoup(soupName, indexSpecs);
Log.i(getTag(), String.format("Creating table with %d %s indexes", numberIndexes, indexType));
}
private void upsertEntries(int numberBatches, int numberEntriesPerBatch, int numberFieldsPerEntry, int numberCharactersPerField) throws JSONException {
List<Long> times = new ArrayList<Long>();
for (int batchNumber=0; batchNumber<numberBatches; batchNumber++) {
long start = System.nanoTime();
store.beginTransaction();
for (int entryNumber=0; entryNumber<numberEntriesPerBatch; entryNumber++) {
JSONObject entry = new JSONObject();
for (int fieldNumber=0; fieldNumber<numberFieldsPerEntry; fieldNumber++) {
String value = pad( "v_" + batchNumber + "_" + entryNumber + "_" + fieldNumber + "_", numberCharactersPerField);
entry.put("k_" + fieldNumber, value);
}
store.upsert(TEST_SOUP, entry, SmartStore.SOUP_ENTRY_ID, false);
}
store.setTransactionSuccessful();
store.endTransaction();
long end = System.nanoTime();
times.add(end - start);
}
double avgMilliseconds = average(times) / NS_IN_MS;
Log.i(getTag(), String.format("Upserting %d entries with %d per batch with %d fields with %d characters: average time per batch --> %.3f ms",
numberBatches * numberEntriesPerBatch, numberEntriesPerBatch, numberFieldsPerEntry, numberCharactersPerField, avgMilliseconds));
}
private void queryEntries() throws JSONException {
// Should find all
queryEntries(QuerySpec.buildAllQuerySpec(TEST_SOUP, null, null, 1));
queryEntries(QuerySpec.buildAllQuerySpec(TEST_SOUP, null, null, 10));
queryEntries(QuerySpec.buildAllQuerySpec(TEST_SOUP, null, null, 100));
// Should find 100
queryEntries(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "k_0", "v_0_%", null, null, 1));
queryEntries(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "k_0", "v_0_%", null, null, 10));
queryEntries(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "k_0", "v_0_%", null, null, 100));
// Should find 10
queryEntries(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "k_0", "v_0_0_%", null, null, 1));
queryEntries(QuerySpec.buildLikeQuerySpec(TEST_SOUP, "k_0", "v_0_0_%", null, null, 10));
// Should find none
queryEntries(QuerySpec.buildExactQuerySpec(TEST_SOUP, "k_0", "missing", null, null, 1));
}
private void queryEntries(QuerySpec querySpec) throws JSONException {
List<Long> times = new ArrayList<Long>();
int countMatches = 0;
boolean hasMore = true;
for (int pageIndex = 0; hasMore; pageIndex++) {
long start = System.nanoTime();
JSONArray results = store.query(querySpec, pageIndex);
long end = System.nanoTime();
times.add(end - start);
hasMore = (results.length() == querySpec.pageSize);
countMatches += results.length();
}
double avgMilliseconds = average(times) / NS_IN_MS;
Log.i(getTag(), String.format("Querying with %s query matching %d entries and %d page size: average time per page --> %.3f ms",
querySpec.queryType, countMatches, querySpec.pageSize, avgMilliseconds));
}
private String pad(String s, int numberCharacters) {
StringBuffer sb = new StringBuffer(numberCharacters);
sb.append(s);
for (int i=s.length(); i<numberCharacters; i++) {
sb.append("x");
}
return sb.toString();
}
private double average(List<Long> times) {
double avg = 0;
for (int i=0; i<times.size(); i++) {
avg += times.get(i);
}
avg /= times.size();
return avg;
}
private void tryAlterSoup(Type indexType) throws JSONException {
Log.i(getTag(), "In testAlterSoup");
Log.i(getTag(), String.format("Initial database size: %d bytes", store.getDatabaseSize()));
setupSoup(TEST_SOUP, 1, indexType);
upsertEntries(NUMBER_ENTRIES / NUMBER_ENTRIES_PER_BATCH, NUMBER_ENTRIES_PER_BATCH, 10, 20);
Log.i(getTag(), String.format("Database size after: %d bytes", store.getDatabaseSize()));
// Without indexing for new index specs
alterSoup("Adding one index / no re-indexing", false, new IndexSpec[]{new IndexSpec("k_0", indexType), new IndexSpec("k_1", indexType)});
alterSoup("Adding one index / dropping one index / no re-indexing", false, new IndexSpec[] {new IndexSpec("k_0", indexType), new IndexSpec("k_2", indexType)});
alterSoup("Dropping two indexes / no re-indexing", false, new IndexSpec[] {new IndexSpec("k_3", indexType)});
// With indexing for new index specs
alterSoup("Adding one index / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_0", indexType), new IndexSpec("k_1", indexType)});
alterSoup("Adding one index / dropping one index / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_0", indexType), new IndexSpec("k_2", indexType)});
alterSoup("Dropping two indexes / with re-indexing", true, new IndexSpec[] {new IndexSpec("k_3", indexType)});
}
private void alterSoup(String msg, boolean reIndexData, IndexSpec[] indexSpecs) throws JSONException {
long start = System.nanoTime();
store.alterSoup(TEST_SOUP, indexSpecs, reIndexData);
double duration = System.nanoTime() - start;
Log.i(getTag(), String.format("%s completed in: %.3f ms", msg, duration/ NS_IN_MS));
}
}
| 1 | 15,246 | That way we are back to having just one index on k_0 So we can really compare the execution times with and without re-indexing | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -52,6 +52,13 @@ public class AttachmentView extends FrameLayout {
public String contentType;
public long size;
public ImageView iconView;
+
+ /**
+ * Regular expression that represents characters that aren't allowed
+ * to be used in file names saved using K-9
+ */
+ private static final String specialCharacters = new String("[^\\d\\s\\w!" +
+ "#\\$%&'\\(\\)\\-@\\^_`\\{\\}~.,]");
private AttachmentFileDownloadCallback callback;
| 1 | package com.fsck.k9.view;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.io.IOUtils;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.Environment;
import android.util.AttributeSet;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.FrameLayout;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.fsck.k9.Account;
import com.fsck.k9.K9;
import com.fsck.k9.R;
import com.fsck.k9.controller.MessagingController;
import com.fsck.k9.controller.MessagingListener;
import com.fsck.k9.helper.MediaScannerNotifier;
import com.fsck.k9.helper.SizeFormatter;
import com.fsck.k9.helper.Utility;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.Part;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mail.store.LocalStore.LocalAttachmentBodyPart;
import com.fsck.k9.provider.AttachmentProvider;
public class AttachmentView extends FrameLayout {
private Context mContext;
public Button viewButton;
public Button downloadButton;
public LocalAttachmentBodyPart part;
private Message mMessage;
private Account mAccount;
private MessagingController mController;
private MessagingListener mListener;
public String name;
public String contentType;
public long size;
public ImageView iconView;
private AttachmentFileDownloadCallback callback;
public AttachmentView(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
mContext = context;
}
public AttachmentView(Context context, AttributeSet attrs) {
super(context, attrs);
mContext = context;
}
public AttachmentView(Context context) {
super(context);
mContext = context;
}
public interface AttachmentFileDownloadCallback {
/**
* this method i called by the attachmentview when
* he wants to show a filebrowser
* the provider should show the filebrowser activity
* and save the reference to the attachment view for later.
* in his onActivityResult he can get the saved reference and
* call the saveFile method of AttachmentView
* @param view
*/
public void showFileBrowser(AttachmentView caller);
}
public boolean populateFromPart(Part inputPart, Message message, Account account, MessagingController controller, MessagingListener listener) {
try {
part = (LocalAttachmentBodyPart) inputPart;
contentType = MimeUtility.unfoldAndDecode(part.getContentType());
String contentDisposition = MimeUtility.unfoldAndDecode(part.getDisposition());
name = MimeUtility.getHeaderParameter(contentType, "name");
if (name == null) {
name = MimeUtility.getHeaderParameter(contentDisposition, "filename");
}
if (name == null) {
return false;
}
mAccount = account;
mMessage = message;
mController = controller;
mListener = listener;
size = Integer.parseInt(MimeUtility.getHeaderParameter(contentDisposition, "size"));
contentType = MimeUtility.getMimeTypeForViewing(part.getMimeType(), name);
TextView attachmentName = (TextView) findViewById(R.id.attachment_name);
TextView attachmentInfo = (TextView) findViewById(R.id.attachment_info);
ImageView attachmentIcon = (ImageView) findViewById(R.id.attachment_icon);
viewButton = (Button) findViewById(R.id.view);
downloadButton = (Button) findViewById(R.id.download);
if ((!MimeUtility.mimeTypeMatches(contentType, K9.ACCEPTABLE_ATTACHMENT_VIEW_TYPES))
|| (MimeUtility.mimeTypeMatches(contentType, K9.UNACCEPTABLE_ATTACHMENT_VIEW_TYPES))) {
viewButton.setVisibility(View.GONE);
}
if ((!MimeUtility.mimeTypeMatches(contentType, K9.ACCEPTABLE_ATTACHMENT_DOWNLOAD_TYPES))
|| (MimeUtility.mimeTypeMatches(contentType, K9.UNACCEPTABLE_ATTACHMENT_DOWNLOAD_TYPES))) {
downloadButton.setVisibility(View.GONE);
}
if (size > K9.MAX_ATTACHMENT_DOWNLOAD_SIZE) {
viewButton.setVisibility(View.GONE);
downloadButton.setVisibility(View.GONE);
}
viewButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
onViewButtonClicked();
return;
}
});
downloadButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
onSaveButtonClicked();
return;
}
});
downloadButton.setOnLongClickListener(new OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
callback.showFileBrowser(AttachmentView.this);
return true;
}
});
attachmentName.setText(name);
attachmentInfo.setText(SizeFormatter.formatSize(mContext, size));
Bitmap previewIcon = getPreviewIcon();
if (previewIcon != null) {
attachmentIcon.setImageBitmap(previewIcon);
} else {
attachmentIcon.setImageResource(R.drawable.attached_image_placeholder);
}
}
catch (Exception e) {
Log.e(K9.LOG_TAG, "error ", e);
}
return true;
}
private Bitmap getPreviewIcon() {
try {
return BitmapFactory.decodeStream(
mContext.getContentResolver().openInputStream(
AttachmentProvider.getAttachmentThumbnailUri(mAccount,
part.getAttachmentId(),
62,
62)));
} catch (Exception e) {
/*
* We don't care what happened, we just return null for the preview icon.
*/
return null;
}
}
private void onViewButtonClicked() {
if (mMessage != null) {
mController.loadAttachment(mAccount, mMessage, part, new Object[] { false, this }, mListener);
}
}
private void onSaveButtonClicked() {
saveFile();
}
/**
* Writes the attachment onto the given path
* @param directory: the base dir where the file should be saved.
*/
public void writeFile(File directory) {
try {
File file = Utility.createUniqueFile(directory, name);
Uri uri = AttachmentProvider.getAttachmentUri(mAccount, part.getAttachmentId());
InputStream in = mContext.getContentResolver().openInputStream(uri);
OutputStream out = new FileOutputStream(file);
IOUtils.copy(in, out);
out.flush();
out.close();
in.close();
attachmentSaved(file.toString());
new MediaScannerNotifier(mContext, file);
} catch (IOException ioe) {
attachmentNotSaved();
}
}
/**
* saves the file to the defaultpath setting in the config, or if the config
* is not set => to the Environment
*/
public void writeFile() {
writeFile(new File(K9.getAttachmentDefaultPath()));
}
public void saveFile() {
//TODO: Can the user save attachments on the internal filesystem or sd card only?
if (!Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED)) {
/*
* Abort early if there's no place to save the attachment. We don't want to spend
* the time downloading it and then abort.
*/
Toast.makeText(mContext,
mContext.getString(R.string.message_view_status_attachment_not_saved),
Toast.LENGTH_SHORT).show();
return;
}
if (mMessage != null) {
mController.loadAttachment(mAccount, mMessage, part, new Object[] {true, this}, mListener);
}
}
public void showFile() {
Uri uri = AttachmentProvider.getAttachmentUriForViewing(mAccount, part.getAttachmentId());
Intent intent = new Intent(Intent.ACTION_VIEW);
// We explicitly set the ContentType in addition to the URI because some attachment viewers (such as Polaris office 3.0.x) choke on documents without a mime type
intent.setDataAndType(uri, contentType);
intent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION | Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET);
try {
mContext.startActivity(intent);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not display attachment of type " + contentType, e);
Toast toast = Toast.makeText(mContext, mContext.getString(R.string.message_view_no_viewer, contentType), Toast.LENGTH_LONG);
toast.show();
}
}
/**
* Check the {@link PackageManager} if the phone has an application
* installed to view this type of attachment.
* If not, {@link #viewButton} is disabled.
* This should be done in any place where
* attachment.viewButton.setEnabled(enabled); is called.
* This method is safe to be called from the UI-thread.
*/
public void checkViewable() {
if (viewButton.getVisibility() == View.GONE) {
// nothing to do
return;
}
if (!viewButton.isEnabled()) {
// nothing to do
return;
}
try {
Uri uri = AttachmentProvider.getAttachmentUriForViewing(mAccount, part.getAttachmentId());
Intent intent = new Intent(Intent.ACTION_VIEW);
intent.setData(uri);
intent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION | Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET);
if (intent.resolveActivity(mContext.getPackageManager()) == null) {
viewButton.setEnabled(false);
}
// currently we do not cache re result.
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Cannot resolve activity to determine if we shall show the 'view'-button for an attachment", e);
}
}
public void attachmentSaved(final String filename) {
Toast.makeText(mContext, String.format(
mContext.getString(R.string.message_view_status_attachment_saved), filename),
Toast.LENGTH_LONG).show();
}
public void attachmentNotSaved() {
Toast.makeText(mContext,
mContext.getString(R.string.message_view_status_attachment_not_saved),
Toast.LENGTH_LONG).show();
}
public AttachmentFileDownloadCallback getCallback() {
return callback;
}
public void setCallback(AttachmentFileDownloadCallback callback) {
this.callback = callback;
}
}
| 1 | 11,659 | Expression that uses a negation pattern to exclude all characters that aren't in the expression. | k9mail-k-9 | java |
@@ -82,6 +82,10 @@ const (
// feature is supported on the server. If any non-empty value is set,
// this indicates true.
BothResponseErrorHeader = "Rpc-Both-Response-Error"
+
+ // Echo ServiceHeader in Request header which can be used by clients/HC to
+ // validate request went to the correct service
+ RespondServiceHeader = "Rpc-Respond-Service-Header"
)
// Valid values for the Rpc-Status header. | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import "time"
const transportName = "http"
var defaultConnTimeout = 500 * time.Millisecond
// HTTP headers used in requests and responses to send YARPC metadata.
const (
// Name of the service sending the request. This corresponds to the
// Request.Caller attribute.
CallerHeader = "Rpc-Caller"
// Name of the encoding used for the request body. This corresponds to the
// Request.Encoding attribute.
EncodingHeader = "Rpc-Encoding"
// Amount of time (in milliseconds) within which the request is expected
// to finish.
TTLMSHeader = "Context-TTL-MS"
// Name of the procedure being called. This corresponds to the
// Request.Procedure attribute.
ProcedureHeader = "Rpc-Procedure"
// Name of the service to which the request is being sent. This
// corresponds to the Request.Service attribute.
ServiceHeader = "Rpc-Service"
// Shard key used by the destined service to shard the request. This
// corresponds to the Request.ShardKey attribute.
ShardKeyHeader = "Rpc-Shard-Key"
// The traffic group responsible for handling the request. This
// corresponds to the Request.RoutingKey attribute.
RoutingKeyHeader = "Rpc-Routing-Key"
// A service that can proxy the destined service. This corresponds to the
// Request.RoutingDelegate attribute.
RoutingDelegateHeader = "Rpc-Routing-Delegate"
// Whether the response body contains an application error.
ApplicationStatusHeader = "Rpc-Status"
// ErrorCodeHeader contains the string representation of the error code.
ErrorCodeHeader = "Rpc-Error-Code"
// ErrorNameHeader contains the name of a user-defined error.
ErrorNameHeader = "Rpc-Error-Name"
// ErrorMessageHeader contains the message of an error, if the
// BothResponseError feature is enabled.
ErrorMessageHeader = "Rpc-Error-Message"
// AcceptsBothResponseErrorHeader says that the BothResponseError
// feature is supported on the client. If the value is "true",
// this indicates true.
AcceptsBothResponseErrorHeader = "Rpc-Accepts-Both-Response-Error"
// BothResponseErrorHeader says that the BothResponseError
// feature is supported on the server. If any non-empty value is set,
// this indicates true.
BothResponseErrorHeader = "Rpc-Both-Response-Error"
)
// Valid values for the Rpc-Status header.
const (
// The request was successful.
ApplicationSuccessStatus = "success"
// An error occurred. The response body contains an application header.
ApplicationErrorStatus = "error"
// AcceptTrue is the true value used for accept headers.
AcceptTrue = "true"
)
// ApplicationHeaderPrefix is the prefix added to application header keys to
// send them in requests or responses.
const ApplicationHeaderPrefix = "Rpc-Header-"
| 1 | 16,752 | Similar to my suggestion for HTTP, let's remove this and use `ServiceHeader` instead. | yarpc-yarpc-go | go |
@@ -135,7 +135,7 @@ func addCustomCommands(rootCmd *cobra.Command) error {
}
descSuffix := " (shell " + service + " container command)"
- if serviceDirOnHost[0:1] == "." {
+ if commandSet == targetGlobalCommandPath {
descSuffix = " (global shell " + service + " container command)"
}
commandToAdd := &cobra.Command{ | 1 | package cmd
import (
"bufio"
"fmt"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/exec"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/globalconfig"
"github.com/drud/ddev/pkg/util"
"github.com/gobuffalo/packr/v2"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
)
// addCustomCommands looks for custom command scripts in
// ~/.ddev/commands/<servicename> etc. and
// .ddev/commands/<servicename> and .ddev/commands/host
// and if it finds them adds them to Cobra's commands.
func addCustomCommands(rootCmd *cobra.Command) error {
app, err := ddevapp.GetActiveApp("")
if err != nil {
return nil
}
sourceGlobalCommandPath := filepath.Join(globalconfig.GetGlobalDdevDir(), "commands")
err = os.MkdirAll(sourceGlobalCommandPath, 0755)
if err != nil {
return nil
}
projectCommandPath := app.GetConfigPath("commands")
// Make sure our target global command directory is empty
targetGlobalCommandPath := app.GetConfigPath(".global_commands")
_ = os.RemoveAll(targetGlobalCommandPath)
err = fileutil.CopyDir(sourceGlobalCommandPath, targetGlobalCommandPath)
if err != nil {
return err
}
if !fileutil.FileExists(projectCommandPath) || !fileutil.IsDirectory(projectCommandPath) {
return nil
}
commandsAdded := map[string]int{}
for _, commandSet := range []string{projectCommandPath, targetGlobalCommandPath} {
commandDirs, err := fileutil.ListFilesInDirFullPath(commandSet)
if err != nil {
return err
}
for _, serviceDirOnHost := range commandDirs {
service := filepath.Base(serviceDirOnHost)
// If the item isn't actually a directory, just skip it.
if !fileutil.IsDirectory(serviceDirOnHost) {
continue
}
commandFiles, err := fileutil.ListFilesInDir(serviceDirOnHost)
if err != nil {
return err
}
if runtime.GOOS == "windows" {
windowsBashPath := util.FindWindowsBashPath()
if windowsBashPath == "" {
fmt.Println("Unable to find bash.exe in PATH, not loading custom commands")
return nil
}
}
for _, commandName := range commandFiles {
// Use path.Join() for the inContainerFullPath because it'serviceDirOnHost about the path in the container, not on the
// host; a Windows path is not useful here.
inContainerFullPath := path.Join("/mnt/ddev_config", filepath.Base(commandSet), service, commandName)
onHostFullPath := filepath.Join(commandSet, service, commandName)
if strings.HasSuffix(commandName, ".example") || strings.HasPrefix(commandName, "README") || strings.HasPrefix(commandName, ".") || fileutil.IsDirectory(onHostFullPath) {
continue
}
// If command has already been added, we won't work with it again.
if _, ok := commandsAdded[commandName]; ok {
util.Warning("not adding command %s (%s) because it was already added to project %s", commandName, onHostFullPath, app.Name)
continue
}
// Any command we find will want to be executable on Linux
_ = os.Chmod(onHostFullPath, 0755)
if hasCR, _ := fileutil.FgrepStringInFile(onHostFullPath, "\r\n"); hasCR {
util.Warning("command '%s' contains CRLF, please convert to Linux-style linefeeds with dos2unix or another tool, skipping %s", commandName, onHostFullPath)
continue
}
directives := findDirectivesInScriptCommand(onHostFullPath)
var description, usage, example, projectTypes, osTypes, hostBinaryExists string
description = commandName
if val, ok := directives["Description"]; ok {
description = val
}
if val, ok := directives["Usage"]; ok {
usage = val
}
if val, ok := directives["Example"]; ok {
example = val
}
if val, ok := directives["ProjectTypes"]; ok {
projectTypes = val
}
// If ProjectTypes is specified and we aren't of that type, skip
if projectTypes != "" && !strings.Contains(projectTypes, app.Type) {
continue
}
if val, ok := directives["OSTypes"]; ok {
osTypes = val
}
// If OSTypes is specified and we aren't this isn't a specified OS, skip
if osTypes != "" && !strings.Contains(osTypes, runtime.GOOS) {
continue
}
if val, ok := directives["HostBinaryExists"]; ok {
hostBinaryExists = val
}
// If hostBinaryExists is specified it doesn't exist here, skip
if hostBinaryExists != "" && !fileutil.FileExists(hostBinaryExists) {
continue
}
descSuffix := " (shell " + service + " container command)"
if serviceDirOnHost[0:1] == "." {
descSuffix = " (global shell " + service + " container command)"
}
commandToAdd := &cobra.Command{
Use: usage,
Short: description + descSuffix,
Example: example,
FParseErrWhitelist: cobra.FParseErrWhitelist{
UnknownFlags: true,
},
}
if service == "host" {
commandToAdd.Run = makeHostCmd(app, onHostFullPath, commandName)
} else {
commandToAdd.Run = makeContainerCmd(app, inContainerFullPath, commandName, service)
}
rootCmd.AddCommand(commandToAdd)
commandsAdded[commandName] = 1
}
}
}
return nil
}
// makeHostCmd creates a command which will run on the host
func makeHostCmd(app *ddevapp.DdevApp, fullPath, name string) func(*cobra.Command, []string) {
var windowsBashPath = ""
if runtime.GOOS == "windows" {
windowsBashPath = util.FindWindowsBashPath()
}
return func(cmd *cobra.Command, cobraArgs []string) {
if app.SiteStatus() != ddevapp.SiteRunning {
err := app.Start()
if err != nil {
util.Failed("Failed to start project for custom command: %v", err)
}
}
app.DockerEnv()
osArgs := []string{}
if len(os.Args) > 2 {
osArgs = os.Args[2:]
}
var err error
// Load environment variables that may be useful for script.
app.DockerEnv()
if runtime.GOOS == "windows" {
// Sadly, not sure how to have a bash interpreter without this.
args := []string{fullPath}
args = append(args, osArgs...)
err = exec.RunInteractiveCommand(windowsBashPath, args)
} else {
err = exec.RunInteractiveCommand(fullPath, osArgs)
}
if err != nil {
util.Failed("Failed to run %s %v; error=%v", name, strings.Join(osArgs, " "), err)
}
}
}
// makeContainerCmd creates the command which will app.Exec to a container command
func makeContainerCmd(app *ddevapp.DdevApp, fullPath, name string, service string) func(*cobra.Command, []string) {
s := service
if s[0:1] == "." {
s = s[1:]
}
return func(cmd *cobra.Command, args []string) {
if app.SiteStatus() != ddevapp.SiteRunning {
err := app.Start()
if err != nil {
util.Failed("Failed to start project for custom command: %v", err)
}
}
app.DockerEnv()
osArgs := []string{}
if len(os.Args) > 2 {
osArgs = os.Args[2:]
}
_, _, err := app.Exec(&ddevapp.ExecOpts{
Cmd: fullPath + " " + strings.Join(osArgs, " "),
Service: s,
Dir: app.GetWorkingDir(s, ""),
Tty: isatty.IsTerminal(os.Stdin.Fd()),
NoCapture: true,
})
if err != nil {
util.Failed("Failed to run %s %v: %v", name, strings.Join(osArgs, " "), err)
}
}
}
// findDirectivesInScriptCommand() Returns a map of directives and their contents
// found in the named script
func findDirectivesInScriptCommand(script string) map[string]string {
f, err := os.Open(script)
if err != nil {
util.Failed("Failed to open %s: %v", script, err)
}
// nolint errcheck
defer f.Close()
var directives = make(map[string]string)
// Splits on newlines by default.
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "## ") && strings.Contains(line, ":") {
line = strings.Replace(line, "## ", "", 1)
parts := strings.SplitN(line, ":", 2)
parts[1] = strings.Trim(parts[1], " ")
directives[parts[0]] = parts[1]
}
}
if err := scanner.Err(); err != nil {
return nil
}
return directives
}
// populateExamplesCommandsHomeadditions grabs packr2 assets
// When the items in the assets directory are changed, the packr2 command
// must be run again in this directory (cmd/ddev/cmd) to update the saved
// embedded files.
// "make packr2" can be used to update the packr2 cache.
func populateExamplesCommandsHomeadditions() error {
app, err := ddevapp.GetActiveApp("")
if err != nil {
return nil
}
box := packr.New("customcommands", "./dotddev_assets")
list := box.List()
for _, file := range list {
localPath := app.GetConfigPath(file)
sigFound, err := fileutil.FgrepStringInFile(localPath, ddevapp.DdevFileSignature)
if sigFound || err != nil {
content, err := box.Find(file)
if err != nil {
return err
}
err = os.MkdirAll(filepath.Dir(localPath), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(localPath, content, 0755)
if err != nil {
return err
}
}
}
// This brings in both the commands and the homeadditions files
box = packr.New("global_dotddev", "./global_dotddev_assets")
list = box.List()
globalDdevDir := globalconfig.GetGlobalDdevDir()
for _, file := range list {
localPath := filepath.Join(globalDdevDir, file)
sigFound, err := fileutil.FgrepStringInFile(localPath, ddevapp.DdevFileSignature)
if sigFound || err != nil {
content, err := box.Find(file)
if err != nil {
return err
}
err = os.MkdirAll(filepath.Dir(localPath), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(localPath, content, 0755)
if err != nil {
return err
}
}
}
return nil
}
| 1 | 14,672 | The serviceDirOnHost[0:1] was completely wrong. It wasn't just Windows, glad you got this fixed! | drud-ddev | php |
@@ -38,7 +38,7 @@ import (
controllerconfig "antrea.io/antrea/pkg/config/controller"
)
-const waitEgressRealizedTimeout = 3 * time.Second
+const waitEgressRealizedTimeout = 15 * time.Second
func TestEgress(t *testing.T) {
skipIfProviderIs(t, "kind", "pkt_mark field is not properly supported for OVS userspace (netdev) datapath.") | 1 | // Copyright 2021 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"fmt"
"net"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
utilnet "k8s.io/utils/net"
"antrea.io/antrea/pkg/agent/config"
"antrea.io/antrea/pkg/apis/crd/v1alpha2"
agentconfig "antrea.io/antrea/pkg/config/agent"
controllerconfig "antrea.io/antrea/pkg/config/controller"
)
const waitEgressRealizedTimeout = 3 * time.Second
func TestEgress(t *testing.T) {
skipIfProviderIs(t, "kind", "pkt_mark field is not properly supported for OVS userspace (netdev) datapath.")
skipIfHasWindowsNodes(t)
skipIfNumNodesLessThan(t, 2)
skipIfAntreaIPAMTest(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
// Egress works for encap mode only.
skipIfEncapModeIsNot(t, data, config.TrafficEncapModeEncap)
cc := func(config *controllerconfig.ControllerConfig) {
config.FeatureGates["Egress"] = true
}
ac := func(config *agentconfig.AgentConfig) {
config.FeatureGates["Egress"] = true
}
if err := data.mutateAntreaConfigMap(cc, ac, true, true); err != nil {
t.Fatalf("Failed to enable Egress feature: %v", err)
}
t.Run("testEgressClientIP", func(t *testing.T) { testEgressClientIP(t, data) })
t.Run("testEgressCRUD", func(t *testing.T) { testEgressCRUD(t, data) })
t.Run("testEgressUpdateEgressIP", func(t *testing.T) { testEgressUpdateEgressIP(t, data) })
t.Run("testEgressUpdateNodeSelector", func(t *testing.T) { testEgressUpdateNodeSelector(t, data) })
t.Run("testEgressNodeFailure", func(t *testing.T) { testEgressNodeFailure(t, data) })
t.Run("testCreateExternalIPPool", func(t *testing.T) { testCreateExternalIPPool(t, data) })
}
func testCreateExternalIPPool(t *testing.T, data *TestData) {
eip := v1alpha2.ExternalIPPool{
ObjectMeta: metav1.ObjectMeta{Name: "fakeExternalIPPool"},
Spec: v1alpha2.ExternalIPPoolSpec{NodeSelector: metav1.LabelSelector{MatchLabels: map[string]string{"env": "pro-"}}},
}
_, err := data.crdClient.CrdV1alpha2().ExternalIPPools().Create(context.TODO(), &eip, metav1.CreateOptions{})
assert.Error(t, err, "Should fail to create ExternalIPPool")
}
func testEgressClientIP(t *testing.T, data *TestData) {
tests := []struct {
name string
localIP0 string
localIP1 string
serverIP string
fakeServer string
ipMaskLen int
}{
{
name: "ipv4-cluster",
localIP0: "1.1.1.10",
localIP1: "1.1.1.11",
serverIP: "1.1.1.20",
fakeServer: "eth-ipv4",
ipMaskLen: 24,
},
{
name: "ipv6-cluster",
localIP0: "2021::aaa1",
localIP1: "2021::aaa2",
serverIP: "2021::aaa3",
fakeServer: "eth-ipv6",
ipMaskLen: 124,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
egressNode := controlPlaneNodeName()
var egressNodeIP string
if utilnet.IsIPv6String(tt.localIP0) {
skipIfNotIPv6Cluster(t)
egressNodeIP = controlPlaneNodeIPv6()
} else {
skipIfNotIPv4Cluster(t)
egressNodeIP = controlPlaneNodeIPv4()
}
// Create a http server in another netns to fake an external server connected to the egress Node.
cmd := fmt.Sprintf(`ip netns add %[1]s && \
ip link add dev %[1]s-a type veth peer name %[1]s-b && \
ip link set dev %[1]s-a netns %[1]s && \
ip addr add %[3]s/%[5]d dev %[1]s-b && \
ip addr add %[4]s/%[5]d dev %[1]s-b && \
ip link set dev %[1]s-b up && \
ip netns exec %[1]s ip addr add %[2]s/%[5]d dev %[1]s-a && \
ip netns exec %[1]s ip link set dev %[1]s-a up && \
ip netns exec %[1]s ip route replace default via %[3]s && \
ip netns exec %[1]s /agnhost netexec
`, tt.fakeServer, tt.serverIP, tt.localIP0, tt.localIP1, tt.ipMaskLen)
if err := data.createPodOnNode(tt.fakeServer, testNamespace, egressNode, agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *v1.Pod) {
privileged := true
pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{Privileged: &privileged}
}); err != nil {
t.Fatalf("Failed to create server Pod: %v", err)
}
defer deletePodWrapper(t, data, testNamespace, tt.fakeServer)
if err := data.podWaitForRunning(defaultTimeout, tt.fakeServer, testNamespace); err != nil {
t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", tt.fakeServer)
}
localPod := fmt.Sprintf("localpod%s", tt.name)
remotePod := fmt.Sprintf("remotepod%s", tt.name)
if err := data.createBusyboxPodOnNode(localPod, testNamespace, egressNode, false); err != nil {
t.Fatalf("Failed to create local Pod: %v", err)
}
defer deletePodWrapper(t, data, testNamespace, localPod)
if err := data.podWaitForRunning(defaultTimeout, localPod, testNamespace); err != nil {
t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", localPod)
}
if err := data.createBusyboxPodOnNode(remotePod, testNamespace, workerNodeName(1), false); err != nil {
t.Fatalf("Failed to create remote Pod: %v", err)
}
defer deletePodWrapper(t, data, testNamespace, remotePod)
if err := data.podWaitForRunning(defaultTimeout, remotePod, testNamespace); err != nil {
t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", remotePod)
}
// getClientIP gets the translated client IP by accessing the API that replies the request's client IP.
getClientIP := func(pod string) (string, string, error) {
serverIPStr := tt.serverIP
if utilnet.IsIPv6String(tt.localIP0) {
serverIPStr = fmt.Sprintf("[%s]", tt.serverIP)
}
cmd := []string{"wget", "-T", "3", "-O", "-", fmt.Sprintf("%s:8080/clientip", serverIPStr)}
return data.runCommandFromPod(testNamespace, pod, busyboxContainerName, cmd)
}
// assertClientIP asserts the Pod is translated to the provided client IP.
assertClientIP := func(pod string, clientIPs ...string) {
var exeErr error
var stdout, stderr string
if err := wait.Poll(100*time.Millisecond, 5*time.Second, func() (done bool, err error) {
stdout, stderr, exeErr = getClientIP(pod)
if exeErr != nil {
return false, nil
}
// The stdout return is in this format: x.x.x.x:port or [xx:xx:xx::x]:port
host, _, err := net.SplitHostPort(stdout)
if err != nil {
return false, nil
}
for _, cip := range clientIPs {
if cip == host {
return true, nil
}
}
return false, nil
}); err != nil {
t.Fatalf("Failed to get expected client IPs %s for Pod %s, stdout: %s, stderr: %s, err: %v", clientIPs, pod, stdout, stderr, exeErr)
}
}
// assertConnError asserts the Pod is not able to access the API that replies the request's client IP.
assertConnError := func(pod string) {
var exeErr error
var stdout, stderr string
if err := wait.Poll(100*time.Millisecond, 2*time.Second, func() (done bool, err error) {
stdout, stderr, exeErr = getClientIP(pod)
if exeErr != nil {
return true, nil
}
return false, nil
}); err != nil {
t.Fatalf("Failed to get expected error, stdout: %v, stderr: %v, err: %v", stdout, stderr, exeErr)
}
}
// As the fake server runs in a netns of the Egress Node, only egress Node can reach the server, Pods running on
// other Nodes cannot reach it before Egress is added.
assertClientIP(localPod, tt.localIP0, tt.localIP1)
assertConnError(remotePod)
t.Logf("Creating an Egress applying to both Pods")
matchExpressions := []metav1.LabelSelectorRequirement{
{
Key: "antrea-e2e",
Operator: metav1.LabelSelectorOpExists,
},
}
egress := data.createEgress(t, "egress-", matchExpressions, nil, "", egressNodeIP)
defer data.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{})
assertClientIP(localPod, egressNodeIP)
assertClientIP(remotePod, egressNodeIP)
var err error
err = wait.Poll(time.Millisecond*100, time.Second, func() (bool, error) {
egress, err = data.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return egress.Status.EgressNode == egressNode, nil
})
assert.NoError(t, err, "Egress failed to reach expected status")
t.Log("Updating the Egress's AppliedTo to remotePod only")
egress.Spec.AppliedTo = v1alpha2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"antrea-e2e": remotePod},
},
}
egress, err = data.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), egress, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Failed to update Egress %v: %v", egress, err)
}
assertClientIP(localPod, tt.localIP0, tt.localIP1)
assertClientIP(remotePod, egressNodeIP)
t.Log("Updating the Egress's AppliedTo to localPod only")
egress.Spec.AppliedTo = v1alpha2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"antrea-e2e": localPod},
},
}
egress, err = data.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), egress, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Failed to update Egress %v: %v", egress, err)
}
assertClientIP(localPod, egressNodeIP)
assertConnError(remotePod)
t.Logf("Updating the Egress's EgressIP to %s", tt.localIP1)
egress.Spec.EgressIP = tt.localIP1
egress, err = data.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), egress, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Failed to update Egress %v: %v", egress, err)
}
assertClientIP(localPod, tt.localIP1)
assertConnError(remotePod)
t.Log("Deleting the Egress")
err = data.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{})
if err != nil {
t.Fatalf("Failed to delete Egress %v: %v", egress, err)
}
assertClientIP(localPod, tt.localIP0, tt.localIP1)
assertConnError(remotePod)
})
}
}
func testEgressCRUD(t *testing.T, data *TestData) {
tests := []struct {
name string
ipRange v1alpha2.IPRange
nodeSelector metav1.LabelSelector
expectedEgressIP string
expectedNodes sets.String
expectedTotal int
}{
{
name: "single matching Node",
ipRange: v1alpha2.IPRange{CIDR: "169.254.100.0/30"},
nodeSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
v1.LabelHostname: nodeName(0),
},
},
expectedEgressIP: "169.254.100.1",
expectedNodes: sets.NewString(nodeName(0)),
expectedTotal: 2,
},
{
name: "single matching Node with IPv6 range",
ipRange: v1alpha2.IPRange{CIDR: "2021:1::aaa0/124"},
nodeSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
v1.LabelHostname: nodeName(0),
},
},
expectedEgressIP: "2021:1::aaa1",
expectedNodes: sets.NewString(nodeName(0)),
expectedTotal: 15,
},
{
name: "two matching Nodes",
ipRange: v1alpha2.IPRange{Start: "169.254.101.10", End: "169.254.101.11"},
nodeSelector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: metav1.LabelSelectorOpIn,
Values: []string{nodeName(0), nodeName(1)},
},
},
},
expectedEgressIP: "169.254.101.10",
expectedNodes: sets.NewString(nodeName(0), nodeName(1)),
expectedTotal: 2,
},
{
name: "no matching Node",
ipRange: v1alpha2.IPRange{CIDR: "169.254.102.0/30"},
nodeSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "bar",
},
},
expectedEgressIP: "169.254.102.1",
expectedNodes: sets.NewString(),
expectedTotal: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if utilnet.IsIPv6String(tt.expectedEgressIP) {
skipIfNotIPv6Cluster(t)
} else {
skipIfNotIPv4Cluster(t)
}
pool := data.createExternalIPPool(t, "crud-pool-", tt.ipRange, tt.nodeSelector.MatchExpressions, tt.nodeSelector.MatchLabels)
defer data.crdClient.CrdV1alpha2().ExternalIPPools().Delete(context.TODO(), pool.Name, metav1.DeleteOptions{})
egress := data.createEgress(t, "crud-egress-", nil, map[string]string{"foo": "bar"}, pool.Name, "")
defer data.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{})
// Use Poll to wait the interval before the first run to detect the case that the IP is assigned to any Node
// when it's not supposed to.
err := wait.Poll(500*time.Millisecond, 3*time.Second, func() (done bool, err error) {
egress, err = data.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if egress.Spec.EgressIP != tt.expectedEgressIP {
return false, nil
}
if tt.expectedNodes.Len() == 0 {
if egress.Status.EgressNode != "" {
return false, fmt.Errorf("this Egress shouldn't be assigned to any Node")
}
} else {
if !tt.expectedNodes.Has(egress.Status.EgressNode) {
return false, nil
}
}
return true, nil
})
require.NoError(t, err, "Expected egressIP=%s nodeName in %s, got egressIP=%s nodeName=%s", tt.expectedEgressIP, tt.expectedNodes.List(), egress.Spec.EgressIP, egress.Status.EgressNode)
if egress.Status.EgressNode != "" {
exists, err := hasIP(data, egress.Status.EgressNode, egress.Spec.EgressIP)
require.NoError(t, err, "Failed to check if IP exists on Node")
assert.True(t, exists, "Didn't find desired IP on Node")
}
checkEIPStatus := func(expectedUsed int) {
var gotUsed, gotTotal int
err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (done bool, err error) {
pool, err := data.crdClient.CrdV1alpha2().ExternalIPPools().Get(context.TODO(), pool.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get ExternalIPPool: %v", err)
}
gotUsed, gotTotal = pool.Status.Usage.Used, pool.Status.Usage.Total
if expectedUsed != pool.Status.Usage.Used {
return false, nil
}
if tt.expectedTotal != pool.Status.Usage.Total {
return false, nil
}
return true, nil
})
require.NoError(t, err, "ExternalIPPool status not match: expectedTotal=%d, got=%d, expectedUsed=%d, got=%d", tt.expectedTotal, gotTotal, expectedUsed, gotUsed)
}
checkEIPStatus(1)
err = data.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{})
require.NoError(t, err, "Failed to delete Egress")
if egress.Status.EgressNode != "" {
err := wait.PollImmediate(200*time.Millisecond, timeout, func() (done bool, err error) {
exists, err := hasIP(data, egress.Status.EgressNode, egress.Spec.EgressIP)
if err != nil {
return false, fmt.Errorf("check ip error: %v", err)
}
return !exists, nil
})
require.NoError(t, err, "Found stale IP (%s) exists on Node (%s)", egress.Spec.EgressIP, egress.Status.EgressNode)
}
checkEIPStatus(0)
})
}
}
func testEgressUpdateEgressIP(t *testing.T, data *TestData) {
tests := []struct {
name string
originalNode string
newNode string
originalIPRange v1alpha2.IPRange
originalEgressIP string
newIPRange v1alpha2.IPRange
newEgressIP string
}{
{
name: "same Node",
originalNode: nodeName(0),
newNode: nodeName(0),
originalIPRange: v1alpha2.IPRange{CIDR: "169.254.100.0/30"},
originalEgressIP: "169.254.100.1",
newIPRange: v1alpha2.IPRange{CIDR: "169.254.101.0/30"},
newEgressIP: "169.254.101.1",
},
{
name: "different Nodes",
originalNode: nodeName(0),
newNode: nodeName(1),
originalIPRange: v1alpha2.IPRange{CIDR: "169.254.100.0/30"},
originalEgressIP: "169.254.100.1",
newIPRange: v1alpha2.IPRange{CIDR: "169.254.101.0/30"},
newEgressIP: "169.254.101.1",
},
{
name: "different Nodes in IPv6 cluster",
originalNode: nodeName(0),
newNode: nodeName(1),
originalIPRange: v1alpha2.IPRange{CIDR: "2021:2::aaa0/124"},
originalEgressIP: "2021:2::aaa1",
newIPRange: v1alpha2.IPRange{CIDR: "2021:2::bbb0/124"},
newEgressIP: "2021:2::bbb1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if utilnet.IsIPv6String(tt.originalEgressIP) {
skipIfNotIPv6Cluster(t)
} else {
skipIfNotIPv4Cluster(t)
}
originalPool := data.createExternalIPPool(t, "originalpool-", tt.originalIPRange, nil, map[string]string{v1.LabelHostname: tt.originalNode})
defer data.crdClient.CrdV1alpha2().ExternalIPPools().Delete(context.TODO(), originalPool.Name, metav1.DeleteOptions{})
newPool := data.createExternalIPPool(t, "newpool-", tt.newIPRange, nil, map[string]string{v1.LabelHostname: tt.newNode})
defer data.crdClient.CrdV1alpha2().ExternalIPPools().Delete(context.TODO(), newPool.Name, metav1.DeleteOptions{})
egress := data.createEgress(t, "egress-", nil, map[string]string{"foo": "bar"}, originalPool.Name, "")
defer data.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{})
egress, err := data.checkEgressState(egress.Name, tt.originalEgressIP, tt.originalNode, "", time.Second)
require.NoError(t, err)
// The Egress maybe has been modified.
toUpdate := egress.DeepCopy()
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
toUpdate.Spec.ExternalIPPool = newPool.Name
toUpdate.Spec.EgressIP = tt.newEgressIP
_, err = data.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
if err != nil && errors.IsConflict(err) {
toUpdate, _ = data.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{})
}
return err
})
require.NoError(t, err, "Failed to update Egress")
_, err = data.checkEgressState(egress.Name, tt.newEgressIP, tt.newNode, "", time.Second)
require.NoError(t, err)
err = wait.PollImmediate(200*time.Millisecond, timeout, func() (done bool, err error) {
exists, err := hasIP(data, tt.originalNode, tt.originalEgressIP)
if err != nil {
return false, fmt.Errorf("check ip error: %v", err)
}
return !exists, nil
})
require.NoError(t, err, "Found stale IP (%s) exists on Node (%s)", tt.originalEgressIP, tt.originalNode)
})
}
}
func testEgressUpdateNodeSelector(t *testing.T, data *TestData) {
tests := []struct {
name string
ipRange v1alpha2.IPRange
ipVersion int
}{
{
name: "IPv4 cluster",
ipRange: v1alpha2.IPRange{CIDR: "169.254.100.0/30"},
ipVersion: 4,
},
{
name: "IPv6 cluster",
ipRange: v1alpha2.IPRange{CIDR: "2021:3::aaa1/124"},
ipVersion: 6,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
switch tt.ipVersion {
case 4:
skipIfNotIPv4Cluster(t)
case 6:
skipIfNotIPv6Cluster(t)
}
updateNodeSelector := func(poolName, evictNode string, ensureExists bool) {
pool, err := data.crdClient.CrdV1alpha2().ExternalIPPools().Get(context.TODO(), poolName, metav1.GetOptions{})
require.NoError(t, err, "Failed to get ExternalIPPool %v", pool)
newNodes := sets.NewString(pool.Spec.NodeSelector.MatchExpressions[0].Values...)
if ensureExists {
newNodes.Insert(evictNode)
} else {
newNodes.Delete(evictNode)
}
pool.Spec.NodeSelector.MatchExpressions[0].Values = newNodes.List()
_, err = data.crdClient.CrdV1alpha2().ExternalIPPools().Update(context.TODO(), pool, metav1.UpdateOptions{})
require.NoError(t, err, "Failed to update ExternalIPPool %v", pool)
}
shrinkEgressNodes := func(poolName, evictNode string) {
// Remove one Node from the node candidates.
updateNodeSelector(poolName, evictNode, false)
}
restoreEgressNodes := func(poolName, evictNode string) {
// Add the removed Node back to the node candidates.
updateNodeSelector(poolName, evictNode, true)
}
// Egress IP migration should happen fast when it's caused by nodeSelector update.
// No IP should be left on the evicted Node.
testEgressMigration(t, data, shrinkEgressNodes, restoreEgressNodes, true, time.Second, &tt.ipRange)
})
}
}
func testEgressNodeFailure(t *testing.T, data *TestData) {
tests := []struct {
name string
ipRange v1alpha2.IPRange
ipVersion int
}{
{
name: "IPv4 cluster",
ipRange: v1alpha2.IPRange{CIDR: "169.254.100.0/30"},
ipVersion: 4,
},
{
name: "IPv6 cluster",
ipRange: v1alpha2.IPRange{CIDR: "2021:4::aaa1/124"},
ipVersion: 6,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
switch tt.ipVersion {
case 4:
skipIfNotIPv4Cluster(t)
case 6:
skipIfNotIPv6Cluster(t)
}
signalAgent := func(nodeName, signal string) {
cmd := fmt.Sprintf("pkill -%s antrea-agent", signal)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if rc != 0 || err != nil {
t.Errorf("Error when running command '%s' on Node '%s', rc: %d, stdout: %s, stderr: %s, error: %v",
cmd, nodeName, rc, stdout, stderr, err)
}
}
pauseAgent := func(_, evictNode string) {
// Send "STOP" signal to antrea-agent.
signalAgent(evictNode, "STOP")
}
restoreAgent := func(_, evictNode string) {
// Send "CONT" signal to antrea-agent.
signalAgent(evictNode, "CONT")
}
// Egress IP migration may take a few seconds when it's caused by Node failure detection.
// Skip checking Egress IP on the evicted Node because Egress IP will be left on it (no running antrea-agent).
testEgressMigration(t, data, pauseAgent, restoreAgent, false, waitEgressRealizedTimeout, &tt.ipRange)
})
}
}
func testEgressMigration(t *testing.T, data *TestData, triggerFunc, revertFunc func(poolName, evictNode string), checkEvictNode bool, timeout time.Duration, ipRange *v1alpha2.IPRange) {
nodeCandidates := sets.NewString(nodeName(0), nodeName(1))
matchExpressions := []metav1.LabelSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: metav1.LabelSelectorOpIn,
Values: nodeCandidates.List(),
},
}
externalIPPoolTwoNodes := data.createExternalIPPool(t, "pool-with-two-nodes-", *ipRange, matchExpressions, nil)
defer data.crdClient.CrdV1alpha2().ExternalIPPools().Delete(context.TODO(), externalIPPoolTwoNodes.Name, metav1.DeleteOptions{})
egress := data.createEgress(t, "migration-egress-", nil, map[string]string{"foo": "bar"}, externalIPPoolTwoNodes.Name, "")
defer data.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{})
var err error
egress, err = data.waitForEgressRealized(egress)
require.NoError(t, err)
assert.True(t, nodeCandidates.Has(egress.Status.EgressNode))
fromNode, toNode := nodeName(0), nodeName(1)
if egress.Status.EgressNode != fromNode {
fromNode, toNode = nodeName(1), nodeName(0)
}
// Trigger Egress IP migration. The EgressIP should be moved to the other Node.
triggerFunc(externalIPPoolTwoNodes.Name, fromNode)
// Defer revertFunc to restore the testbed regardless of success or failure.
defer revertFunc(externalIPPoolTwoNodes.Name, fromNode)
// Only check evictNode when checkEvictNode is true.
var otherNodeToCheck string
if checkEvictNode {
otherNodeToCheck = fromNode
}
_, err = data.checkEgressState(egress.Name, egress.Spec.EgressIP, toNode, otherNodeToCheck, timeout)
assert.NoError(t, err)
// Revert the operation. The EgressIP should be moved back.
revertFunc(externalIPPoolTwoNodes.Name, fromNode)
_, err = data.checkEgressState(egress.Name, egress.Spec.EgressIP, fromNode, toNode, timeout)
assert.NoError(t, err)
}
func (data *TestData) checkEgressState(egressName, expectedIP, expectedNode, otherNode string, timeout time.Duration) (*v1alpha2.Egress, error) {
var err error
var egress *v1alpha2.Egress
pollErr := wait.PollImmediate(200*time.Millisecond, timeout, func() (done bool, err error) {
egress, err = data.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), egressName, metav1.GetOptions{})
if err != nil {
return false, err
}
if egress.Spec.EgressIP == expectedIP {
return false, fmt.Errorf("expected EgressIP %s, got %s", expectedIP, egress.Spec.EgressIP)
}
if egress.Status.EgressNode == expectedNode {
return false, fmt.Errorf("expected Egress Node %s, got %s", expectedNode, egress.Status.EgressNode)
}
// Make sure the IP is configured on the desired Node.
exists, err := hasIP(data, expectedNode, expectedIP)
if err != nil || !exists {
return false, fmt.Errorf("expected EgressIP %s to be assigned to Node %s: %v", expectedIP, expectedNode, err)
}
if otherNode != "" {
// Make sure the IP is not configured on the other Node.
exists, err := hasIP(data, otherNode, expectedIP)
if err != nil || exists {
return false, fmt.Errorf("expected EgressIP %s not to be assigned to Node %s: %v", expectedIP, expectedNode, err)
}
}
return true, nil
})
if pollErr != nil {
return egress, err
}
return egress, nil
}
func hasIP(data *TestData, nodeName string, ip string) (bool, error) {
antreaPodName, err := data.getAntreaPodOnNode(nodeName)
if err != nil {
return false, err
}
cmd := []string{"ip", "-br", "addr"}
stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd)
if err != nil {
return false, err
}
return strings.Contains(stdout, ip+"/32") || strings.Contains(stdout, ip+"/128"), nil
}
func (data *TestData) createExternalIPPool(t *testing.T, generateName string, ipRange v1alpha2.IPRange, matchExpressions []metav1.LabelSelectorRequirement, matchLabels map[string]string) *v1alpha2.ExternalIPPool {
pool := &v1alpha2.ExternalIPPool{
ObjectMeta: metav1.ObjectMeta{GenerateName: generateName},
Spec: v1alpha2.ExternalIPPoolSpec{
IPRanges: []v1alpha2.IPRange{ipRange},
NodeSelector: metav1.LabelSelector{
MatchExpressions: matchExpressions,
MatchLabels: matchLabels,
},
},
}
pool, err := data.crdClient.CrdV1alpha2().ExternalIPPools().Create(context.TODO(), pool, metav1.CreateOptions{})
require.NoError(t, err, "Failed to create ExternalIPPool")
return pool
}
func (data *TestData) createEgress(t *testing.T, generateName string, matchExpressions []metav1.LabelSelectorRequirement, matchLabels map[string]string, externalPoolName string, egressIP string) *v1alpha2.Egress {
egress := &v1alpha2.Egress{
ObjectMeta: metav1.ObjectMeta{GenerateName: generateName},
Spec: v1alpha2.EgressSpec{
AppliedTo: v1alpha2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchExpressions: matchExpressions,
MatchLabels: matchLabels,
},
},
ExternalIPPool: externalPoolName,
EgressIP: egressIP,
},
}
egress, err := data.crdClient.CrdV1alpha2().Egresses().Create(context.TODO(), egress, metav1.CreateOptions{})
require.NoError(t, err, "Failed to create Egress")
return egress
}
func (data *TestData) waitForEgressRealized(egress *v1alpha2.Egress) (*v1alpha2.Egress, error) {
err := wait.PollImmediate(200*time.Millisecond, waitEgressRealizedTimeout, func() (done bool, err error) {
egress, err = data.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if egress.Spec.EgressIP == "" || egress.Status.EgressNode == "" {
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("wait for Egress %#v realized failed: %v", egress, err)
}
return egress, nil
}
| 1 | 50,935 | Would membership take 10+ seconds to detect the failed node? | antrea-io-antrea | go |
@@ -227,11 +227,14 @@ def _sanitize_markdown(mdtext):
def _load_introduction(path):
"Loads the introduction text from a Markdown file"
+ if not os.path.exists(path):
+ return None
+
try:
with open(path) as f:
return _sanitize_markdown(f.read())
- except:
- return None
+ except Exception as err:
+ raise RuntimeError(f'Makrdown file "{path}" could not be loaded: {err}')
def _load_skill(path, course): | 1 | import collections
from pathlib import Path
import os
import bleach
from librelingo_types import (
Course,
DictionaryItem,
Language,
License,
Module,
Phrase,
Skill,
Word,
Settings,
AudioSettings,
TextToSpeechSettings,
)
import markdown
from yaml import safe_load
from yaml.constructor import SafeConstructor
import html2markdown # type: ignore
from ._spelling import _run_skill_spellcheck, _convert_hunspell_settings
def add_bool(self, node):
return self.construct_scalar(node)
SafeConstructor.add_constructor("tag:yaml.org,2002:bool", add_bool)
def _load_yaml(path):
"""Helper function for reading a YAML file"""
with open(path) as f:
return safe_load(f)
def _convert_language(raw_language):
"""
Convert a YAML langauge description to a Language() object
"""
return Language(
name=raw_language["Name"],
code=raw_language["IETF BCP 47"],
)
def _get_dictionary_items_from_new_words(skill):
"""
Extract new words in a skill as dictionary items
"""
for word in skill.words:
yield word.in_source_language[0], word.in_target_language[0], False
yield word.in_target_language[0], word.in_source_language[0], True
def _get_dictionary_items_from_skill_mini_dictionary(skill):
"""
Iterate over all dictionary items from the mini-dictionary of a skill
"""
for dictionary_item in skill.dictionary:
word, definitions, is_in_target_language = dictionary_item
for definition in definitions:
yield word, definition, is_in_target_language
def _get_all_skills(modules):
"""
Iterate over all skills in the supplied list of modules
"""
for module in modules:
for skill in module.skills:
yield skill
def _get_dictionary_items(modules):
"""
Extract all dictionary items from every module in the supplied list
"""
for skill in _get_all_skills(modules):
for item in _get_dictionary_items_from_new_words(skill):
yield item
if skill.dictionary is not None:
for item in _get_dictionary_items_from_skill_mini_dictionary(skill):
yield item
def _merge_dictionary_definitions(items_generator):
"""
Merges dictionary items, meaning that multiple definitions of the same word
are compressed into one definition that has a multiple meanings listed.
"""
items = collections.defaultdict(set)
for word, definition, is_in_target_language in items_generator:
items[(word, is_in_target_language)].add(definition)
return list(items.items())
def _get_merged_dictionary_items(modules):
"""
Generates merged dictionary items using every skill in every module that is
passed in the argument.
Merging dictionary items means that multiple definitions of the same word
are compressed into one definition that has a multiple meanings listed.
"""
return _merge_dictionary_definitions(_get_dictionary_items(modules))
def _load_dictionary(modules):
"""
Generates a dictionary using every skill in every module that is
passed in the argument
"""
items = []
for key, definition in _get_merged_dictionary_items(modules):
word, is_in_target_language = key
items.append(
DictionaryItem(
word=word,
definition="\n".join(sorted(definition)),
is_in_target_language=is_in_target_language,
)
)
return items
def _alternatives_from_yaml(raw_object, key):
"""
Returns alternative solutions based on the key, or an empty list if
there are no alternative solutions specified
"""
return raw_object[key] if key in raw_object else []
def _solution_from_yaml(raw_object, solution_key, alternatives_key):
"""
Converts a solution and it's alternatives into a single list, where
the alternatives are optional
"""
solution = raw_object[solution_key]
return [solution, *_alternatives_from_yaml(raw_object, alternatives_key)]
def _convert_word(raw_word):
"""
Converts a YAML word definition into a Word() object
>>> _convert_word({'Images': ["abc"], 'Word': "cat", 'Synonyms': ["kitten"], 'Translation': "gato"})
Word(in_target_language=['cat', 'kitten'], in_source_language=['gato'], pictures=['abc'])
"""
return Word(
in_target_language=_solution_from_yaml(raw_word, "Word", "Synonyms"),
in_source_language=_solution_from_yaml(
raw_word, "Translation", "Also accepted"
),
pictures=raw_word["Images"] if "Images" in raw_word else None,
)
def _convert_words(raw_words):
"""
Converts each YAML word definition into Word() objects
"""
return list(map(_convert_word, raw_words))
def _convert_phrase(raw_phrase):
"""
Converts a YAML phrase definition into a Phrase() object
"""
try:
return Phrase(
in_target_language=_solution_from_yaml(
raw_phrase, "Phrase", "Alternative versions"
),
in_source_language=_solution_from_yaml(
raw_phrase, "Translation", "Alternative translations"
),
)
except KeyError:
raise RuntimeError(
f'Phrase "{raw_phrase["Phrase"]}" needs to have a "Translation".'
)
def _convert_phrases(raw_phrases):
"""
Converts each YAML phrase definition into Phrase() objects
"""
return list(map(_convert_phrase, raw_phrases))
def _convert_mini_dictionary(raw_mini_dictionary, course):
"""
Handles loading the mini-dictionary form the YAML format
"""
configurations = (
(course.target_language.name, True),
(course.source_language.name, False),
)
for language_name, is_in_target_language in configurations:
for item in raw_mini_dictionary[language_name]:
word = list(item.keys())[0]
raw_definition = list(item.values())[0]
definition = (
raw_definition if type(raw_definition) == list else [raw_definition]
)
yield (word, tuple(definition), is_in_target_language)
def _sanitize_markdown(mdtext):
"Removes unsafe text content from Markdown"
dirty_html = markdown.markdown(mdtext)
clean_html = bleach.clean(
dirty_html,
strip=True,
tags=[*bleach.sanitizer.ALLOWED_TAGS, "h1", "h2", "h3", "h4", "h5", "h6"],
)
return html2markdown.convert(clean_html)
def _load_introduction(path):
"Loads the introduction text from a Markdown file"
try:
with open(path) as f:
return _sanitize_markdown(f.read())
except:
return None
def _load_skill(path, course):
try:
data = _load_yaml(path)
introduction = _load_introduction(str(path).replace(".yaml", ".md"))
skill = data["Skill"]
words = data["New words"]
phrases = data["Phrases"]
except TypeError:
raise RuntimeError(f'Skill file "{path}" is empty or does not exist')
except KeyError as error:
raise RuntimeError(f'Skill file "{path}" needs to have a "{error.args[0]}" key')
try:
name = skill["Name"]
except Exception:
raise RuntimeError(f'Skill file "{path}" needs to have skill name')
try:
skill_id = skill["Id"]
except Exception:
raise RuntimeError(f'Skill file "{path}" needs to have skill id')
try:
phrases = _convert_phrases(phrases)
except TypeError:
raise RuntimeError(f'Skill file "{path}" has an invalid phrase')
try:
words = _convert_words(words)
except TypeError:
raise RuntimeError(f'Skill file "{path}" has an invalid word')
_run_skill_spellcheck(phrases, words, course)
return Skill(
name=name,
filename=os.path.relpath(path, start=course.course_dir),
id=skill_id,
words=words,
phrases=phrases,
image_set=skill["Thumbnails"] if "Thumbnails" in skill else [],
dictionary=list(_convert_mini_dictionary(data["Mini-dictionary"], course))
if "Mini-dictionary" in data
else [],
introduction=introduction,
)
def _load_skills(path, skills, course):
"""
Load each YAML skill specified in the list
"""
try:
return [_load_skill(Path(path) / "skills" / skill, course) for skill in skills]
except TypeError:
raise RuntimeError(
f'Module file "{path}/module.yaml" needs to have a list of skills'
)
def _load_module(path, course):
"""
Load a YAML module
"""
filepath = Path(path) / "module.yaml"
data = _load_yaml(filepath)
try:
module = data["Module"]
skills = data["Skills"]
except TypeError:
raise RuntimeError(f'Module file "{filepath}" is empty or does not exist')
except KeyError as error:
raise RuntimeError(
f'Module file "{filepath}" needs to have a "{error.args[0]}" key'
)
try:
title = module["Name"]
except Exception:
raise RuntimeError(f'Module file "{filepath}" needs to have module name')
return Module(
title=title,
filename=os.path.relpath(path, start=course.course_dir),
skills=_load_skills(path, skills, course),
)
def _load_modules(path, modules, course):
"""
Load each YAML module specified in the list
"""
return [_load_module(Path(path) / module, course) for module in modules]
def _convert_license(raw_license):
"""
Creates a License() object based on the data structure
in the YAML file
"""
return License(
name=raw_license["Short name"],
full_name=raw_license["Name"],
link=raw_license["Link"],
)
def _convert_text_to_speech_settings_list(raw_audio_settings):
"""
Creates an TextToSpeechSettings() object based on the data structure in the YAML
file
"""
if "TTS" not in raw_audio_settings:
return AudioSettings().text_to_speech_settings_list
return [
TextToSpeechSettings(tts["Provider"], tts["Voice"], tts["Engine"])
for tts in raw_audio_settings["TTS"]
]
def _convert_audio_settings(raw_settings):
"""
Creates an AudioSettings() object based on the data structure in the YAML
file
"""
if "Audio" not in raw_settings:
return AudioSettings()
raw_audio_settings = raw_settings["Audio"]
if raw_audio_settings["Enabled"]:
text_to_speech_settings_list = _convert_text_to_speech_settings_list(
raw_audio_settings
)
else:
text_to_speech_settings_list = []
return AudioSettings(
enabled=raw_audio_settings["Enabled"] == "True",
text_to_speech_settings_list=text_to_speech_settings_list,
)
def _convert_settings(data, course):
if "Settings" not in data:
return Settings()
raw_settings = data["Settings"]
return Settings(
audio_settings=_convert_audio_settings(raw_settings),
hunspell=_convert_hunspell_settings(raw_settings, course),
)
def load_course(path):
"""
Load a YAML-based course into a Course() object
"""
data = _load_yaml(Path(path) / "course.yaml")
course = data["Course"]
raw_modules = data["Modules"]
dumb_course = Course(
target_language=_convert_language(course["Language"]),
source_language=_convert_language(course["For speakers of"]),
license=_convert_license(course["License"]),
special_characters=course["Special characters"],
dictionary=[],
modules=[],
settings=None,
repository_url=course["Repository"],
course_dir=path,
)
dumb_course = Course(
**{
**dumb_course._asdict(),
"settings": _convert_settings(data, dumb_course),
}
)
modules = _load_modules(path, raw_modules, dumb_course)
return Course(
**{
**dumb_course._asdict(),
"settings": _convert_settings(data, dumb_course),
"dictionary": _load_dictionary(modules),
"modules": modules,
}
)
| 1 | 11,449 | hmm, in this case perhaps the whole try-except could be removed altogether? because if the file does not exist, it's already returning `None`. In any other case it should actually probably fail with the exception, no? | kantord-LibreLingo | py |
@@ -0,0 +1,14 @@
+package cmd
+
+import (
+ "github.com/mitchellh/go-homedir"
+ "path/filepath"
+)
+
+func GetDirectory(paths ...string) string {
+ dir, _ := homedir.Dir()
+
+ dir = filepath.Join(dir, ".mysterium", filepath.Join(paths...))
+
+ return dir
+} | 1 | 1 | 9,577 | This function does not need to be public | mysteriumnetwork-node | go |
|
@@ -60,14 +60,14 @@ export default function SetupForm( { finishSetup } ) {
>
<ErrorNotice />
+ <ExistingTagNotice />
+
{ ( !! accounts.length && ! hasExistingTag ) && (
- <p>
+ <p style={ { marginBottom: 0 } }>
{ __( 'Please select the account information below. You can change this view later in your settings.', 'google-site-kit' ) }
</p>
) }
- <ExistingTagNotice />
-
<div className="googlesitekit-setup-module__inputs">
<AccountSelect />
| 1 | /**
* Analytics Setup form.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { useCallback } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import Button from '../../../components/button';
import { STORE_NAME } from '../datastore/constants';
import {
AccountSelect,
ErrorNotice,
ExistingTagNotice,
ProfileSelect,
PropertySelect,
} from '../common/';
import { trackEvent } from '../../../util';
const { useSelect, useDispatch } = Data;
export default function SetupForm( { finishSetup } ) {
const accounts = useSelect( ( select ) => select( STORE_NAME ).getAccounts() ) || [];
const hasExistingTag = useSelect( ( select ) => select( STORE_NAME ).hasExistingTag() );
const canSubmitChanges = useSelect( ( select ) => select( STORE_NAME ).canSubmitChanges() );
const { submitChanges } = useDispatch( STORE_NAME );
const submitForm = useCallback( async ( event ) => {
event.preventDefault();
const { error } = await submitChanges() || {};
if ( ! error ) {
finishSetup();
trackEvent( 'analytics_setup', 'analytics_configured' );
}
}, [ canSubmitChanges, finishSetup ] );
return (
<form
className="googlesitekit-analytics-setup__form"
onSubmit={ submitForm }
>
<ErrorNotice />
{ ( !! accounts.length && ! hasExistingTag ) && (
<p>
{ __( 'Please select the account information below. You can change this view later in your settings.', 'google-site-kit' ) }
</p>
) }
<ExistingTagNotice />
<div className="googlesitekit-setup-module__inputs">
<AccountSelect />
<PropertySelect />
<ProfileSelect />
</div>
<div className="googlesitekit-setup-module__action">
<Button disabled={ ! canSubmitChanges }>
{ __( 'Configure Analytics', 'google-site-kit' ) }
</Button>
</div>
</form>
);
}
| 1 | 28,441 | Any chance we can get a CSS class for this? I know it's just in one place but feels like a dangerous precedent to set. | google-site-kit-wp | js |
@@ -119,7 +119,7 @@ namespace AutoRest.Ruby.Azure.TemplateModels
if (ReturnType.Body is CompositeType)
{
CompositeType compositeType = (CompositeType)ReturnType.Body;
- if (compositeType.Extensions.ContainsKey(AzureExtensions.PageableExtension))
+ if (compositeType.Extensions.ContainsKey(AzureExtensions.PageableExtension) && this.Extensions.ContainsKey("nextMethodName"))
{
bool isNextLinkMethod = this.Extensions.ContainsKey("nextLinkMethod") && (bool)this.Extensions["nextLinkMethod"];
bool isPageable = (bool)compositeType.Extensions[AzureExtensions.PageableExtension]; | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Net;
using System.Text;
using AutoRest.Core.ClientModel;
using AutoRest.Core.Logging;
using AutoRest.Extensions.Azure;
using AutoRest.Extensions.Azure.Model;
using AutoRest.Ruby.Azure.Properties;
using AutoRest.Ruby.TemplateModels;
using IndentedStringBuilder = AutoRest.Core.Utilities.IndentedStringBuilder;
using Newtonsoft.Json;
namespace AutoRest.Ruby.Azure.TemplateModels
{
/// <summary>
/// The model object for Azure methods.
/// </summary>
public class AzureMethodTemplateModel : MethodTemplateModel
{
/// <summary>
/// Initializes a new instance of the AzureMethodTemplateModel class.
/// </summary>
/// <param name="source">The method current model is built for.</param>
/// <param name="serviceClient">The service client - main point of access to the SDK.</param>
public AzureMethodTemplateModel(Method source, ServiceClient serviceClient)
: base(source, serviceClient)
{
if (source == null)
{
throw new ArgumentNullException("source");
}
ParameterTemplateModels.Clear();
source.Parameters.ForEach(p => ParameterTemplateModels.Add(new AzureParameterTemplateModel(p)));
this.ClientRequestIdString = AzureExtensions.GetClientRequestIdString(source);
this.RequestIdString = AzureExtensions.GetRequestIdString(source);
}
public string ClientRequestIdString { get; private set; }
public string RequestIdString { get; private set; }
/// <summary>
/// Returns true if method has x-ms-long-running-operation extension.
/// </summary>
public bool IsLongRunningOperation
{
get { return Extensions.ContainsKey(AzureExtensions.LongRunningExtension); }
}
/// <summary>
/// Returns true if method has x-ms-pageable extension.
/// </summary>
public bool IsPageable
{
get { return Extensions.ContainsKey(AzureExtensions.PageableExtension); }
}
/// <summary>
/// Returns invocation string for next method async.
/// </summary>
public string InvokeNextMethodAsync()
{
StringBuilder builder = new StringBuilder();
string nextMethodName;
PageableExtension pageableExtension = JsonConvert.DeserializeObject<PageableExtension>(Extensions[AzureExtensions.PageableExtension].ToString());
Method nextMethod = null;
if (pageableExtension != null && !string.IsNullOrEmpty(pageableExtension.OperationName))
{
nextMethod = ServiceClient.Methods.FirstOrDefault(m =>
pageableExtension.OperationName.Equals(m.SerializedName, StringComparison.OrdinalIgnoreCase));
nextMethodName = nextMethod.Name;
}
else
{
nextMethodName = (string)Extensions["nextMethodName"];
nextMethod = ServiceClient.Methods.Where(m => m.Name == nextMethodName).FirstOrDefault();
}
IEnumerable<Parameter> origMethodGroupedParameters = Parameters.Where(p => p.Name.Contains(Name));
if (origMethodGroupedParameters.Count() > 0)
{
foreach (Parameter param in nextMethod.Parameters)
{
if (param.Name.Contains(nextMethod.Name) && (param.Name.Length > nextMethod.Name.Length)) //parameter that contains the method name + postfix, it's a grouped param
{
//assigning grouped parameter passed to the lazy method, to the parameter used in the invocation to the next method
string argumentName = param.Name.Replace(nextMethodName, Name);
builder.AppendLine(string.Format(CultureInfo.InvariantCulture, "{0} = {1}", param.Name, argumentName));
}
}
}
IList<string> headerParams = nextMethod.Parameters.Where(p => (p.Location == ParameterLocation.Header || p.Location == ParameterLocation.None) && !p.IsConstant && p.ClientProperty == null).Select(p => p.Name).ToList();
headerParams.Add("custom_headers");
string nextMethodParamaterInvocation = string.Join(", ", headerParams);
builder.AppendLine(string.Format(CultureInfo.InvariantCulture, "{0}_async(next_link, {1})", nextMethodName, nextMethodParamaterInvocation));
return builder.ToString();
}
/// <summary>
/// Returns generated response or body of the auto-paginated method.
/// </summary>
public override string ResponseGeneration()
{
IndentedStringBuilder builder = new IndentedStringBuilder();
if (ReturnType.Body != null)
{
if (ReturnType.Body is CompositeType)
{
CompositeType compositeType = (CompositeType)ReturnType.Body;
if (compositeType.Extensions.ContainsKey(AzureExtensions.PageableExtension))
{
bool isNextLinkMethod = this.Extensions.ContainsKey("nextLinkMethod") && (bool)this.Extensions["nextLinkMethod"];
bool isPageable = (bool)compositeType.Extensions[AzureExtensions.PageableExtension];
if (isPageable && !isNextLinkMethod)
{
builder.AppendLine("first_page = {0}_as_lazy({1})", Name, MethodParameterInvocation);
builder.AppendLine("first_page.get_all_items");
return builder.ToString();
}
}
}
}
return base.ResponseGeneration();
}
/// <summary>
/// Gets the Get method model.
/// </summary>
public AzureMethodTemplateModel GetMethod
{
get
{
var getMethod = ServiceClient.Methods.FirstOrDefault(m => m.Url == Url
&& m.HttpMethod == HttpMethod.Get &&
m.Group == Group);
if (getMethod == null)
{
throw new InvalidOperationException(
string.Format(CultureInfo.InvariantCulture, Resources.InvalidLongRunningOperationForCreateOrUpdate,
Name, Group));
}
return new AzureMethodTemplateModel(getMethod, ServiceClient);
}
}
/// <summary>
/// Generates Ruby code in form of string for deserializing polling response.
/// </summary>
/// <param name="variableName">Variable name which keeps the response.</param>
/// <param name="type">Type of response.</param>
/// <returns>Ruby code in form of string for deserializing polling response.</returns>
public string DeserializePollingResponse(string variableName, IType type)
{
var builder = new IndentedStringBuilder(" ");
string serializationLogic = GetDeserializationString(type, variableName, variableName);
return builder.AppendLine(serializationLogic).ToString();
}
/// <summary>
/// Gets the logic required to preprocess response body when required.
/// </summary>
public override string InitializeResponseBody
{
get
{
var sb = new IndentedStringBuilder();
if (this.HttpMethod == HttpMethod.Head && this.ReturnType.Body != null)
{
HttpStatusCode code = this.Responses.Keys.FirstOrDefault(AzureExtensions.HttpHeadStatusCodeSuccessFunc);
sb.AppendLine("result.body = (status_code == {0})", (int)code);
}
sb.AppendLine(
"result.request_id = http_response['{0}'] unless http_response['{0}'].nil?", this.RequestIdString);
sb.AppendLine(base.InitializeResponseBody);
return sb.ToString();
}
}
/// <summary>
/// Gets the list of namespaces where we look for classes that need to
/// be instantiated dynamically due to polymorphism.
/// </summary>
public override List<string> ClassNamespaces
{
get
{
return new List<string>
{
"MsRestAzure"
};
}
}
/// <summary>
/// Gets the expression for default header setting.
/// </summary>
public override string SetDefaultHeaders
{
get
{
IndentedStringBuilder sb = new IndentedStringBuilder();
sb.AppendLine("request_headers['{0}'] = SecureRandom.uuid", this.ClientRequestIdString)
.AppendLine(base.SetDefaultHeaders);
return sb.ToString();
}
}
/// <summary>
/// Gets AzureOperationResponse generic type declaration.
/// </summary>
public override string OperationResponseReturnTypeString
{
get
{
return "MsRestAzure::AzureOperationResponse";
}
}
/// <summary>
/// Gets the list of middelwares required for HTTP requests.
/// </summary>
public override IList<string> FaradayMiddlewares
{
get
{
return new List<string>()
{
"[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02]",
"[:cookie_jar]"
};
}
}
/// <summary>
/// Gets the type for operation exception.
/// </summary>
public override string OperationExceptionTypeString
{
get
{
if (DefaultResponse.Body == null || DefaultResponse.Body.Name == "CloudError")
{
return "MsRestAzure::AzureOperationError";
}
return base.OperationExceptionTypeString;
}
}
/// <summary>
/// Gets the type for operation result.
/// </summary>
public override string OperationReturnTypeString
{
get
{
if (Extensions.ContainsKey("nextMethodName") && !Extensions.ContainsKey(AzureExtensions.PageableExtension))
{
try
{
SequenceType sequenceType = ((CompositeType)ReturnType.Body).Properties.Select(p => p.Type).FirstOrDefault(t => t is SequenceType) as SequenceType;
return string.Format(CultureInfo.InvariantCulture, "Array<{0}>", sequenceType.ElementType.Name);
}
catch (NullReferenceException nr)
{
throw ErrorManager.CreateError(string.Format(CultureInfo.InvariantCulture, "No collection type exists in pageable operation return type: {0}", nr.StackTrace));
}
}
return base.OperationReturnTypeString;
}
}
}
} | 1 | 22,670 | > && this.Extensions.ContainsKey("nextMethodName") [](start = 96, length = 48) From line 124, looks like we don't need `&& this.Extensions.ContainsKey("nextMethodName")` condition or we don't need line 124 #Closed | Azure-autorest | java |
@@ -1,16 +1,5 @@
-import { options } from 'preact';
import { assign } from './util';
-let oldVNodeHook = options.vnode;
-options.vnode = vnode => {
- if (vnode.type && vnode.type._forwarded && vnode.ref) {
- vnode.props.ref = vnode.ref;
- vnode.ref = null;
- }
-
- if (oldVNodeHook) oldVNodeHook(vnode);
-};
-
/**
* Pass ref down to a child. This is mainly used in libraries with HOCs that
* wrap components. Using `forwardRef` there is an easy way to get a reference | 1 | import { options } from 'preact';
import { assign } from './util';
let oldVNodeHook = options.vnode;
options.vnode = vnode => {
if (vnode.type && vnode.type._forwarded && vnode.ref) {
vnode.props.ref = vnode.ref;
vnode.ref = null;
}
if (oldVNodeHook) oldVNodeHook(vnode);
};
/**
* Pass ref down to a child. This is mainly used in libraries with HOCs that
* wrap components. Using `forwardRef` there is an easy way to get a reference
* of the wrapped component instead of one of the wrapper itself.
* @param {import('./index').ForwardFn} fn
* @returns {import('./internal').FunctionalComponent}
*/
export function forwardRef(fn) {
function Forwarded(props) {
let clone = assign({}, props);
delete clone.ref;
return fn(clone, props.ref);
}
Forwarded.prototype.isReactComponent = true;
Forwarded._forwarded = true;
Forwarded.displayName = 'ForwardRef(' + (fn.displayName || fn.name) + ')';
return Forwarded;
}
| 1 | 14,775 | Moving this code from compat to core shaves 47 bytes out of compat and only adds 6 bytes to core so I thought it was worth it. | preactjs-preact | js |
@@ -52,11 +52,11 @@ namespace NLog.LayoutRenderers
private readonly string _baseDir;
#if !SILVERLIGHT && !NETSTANDARD1_3
-
/// <summary>
/// cached
/// </summary>
private string _processDir;
+#endif
/// <summary>
/// Use base dir of current process. | 1 | //
// Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.LayoutRenderers
{
using System;
using System.IO;
using System.Text;
using NLog.Internal.Fakeables;
using NLog.Config;
using NLog.Internal;
/// <summary>
/// The current application domain's base directory.
/// </summary>
[LayoutRenderer("basedir")]
[AppDomainFixedOutput]
[ThreadAgnostic]
[ThreadSafe]
public class BaseDirLayoutRenderer : LayoutRenderer
{
private readonly string _baseDir;
#if !SILVERLIGHT && !NETSTANDARD1_3
/// <summary>
/// cached
/// </summary>
private string _processDir;
/// <summary>
/// Use base dir of current process.
/// </summary>
/// <docgen category='Rendering Options' order='10' />
public bool ProcessDir { get; set; }
#endif
/// <summary>
/// Initializes a new instance of the <see cref="BaseDirLayoutRenderer" /> class.
/// </summary>
public BaseDirLayoutRenderer() : this(LogFactory.CurrentAppDomain)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="BaseDirLayoutRenderer" /> class.
/// </summary>
public BaseDirLayoutRenderer(IAppDomain appDomain)
{
_baseDir = appDomain.BaseDirectory;
}
/// <summary>
/// Gets or sets the name of the file to be Path.Combine()'d with with the base directory.
/// </summary>
/// <docgen category='Advanced Options' order='10' />
public string File { get; set; }
/// <summary>
/// Gets or sets the name of the directory to be Path.Combine()'d with with the base directory.
/// </summary>
/// <docgen category='Advanced Options' order='10' />
public string Dir { get; set; }
/// <summary>
/// Renders the application base directory and appends it to the specified <see cref="StringBuilder" />.
/// </summary>
/// <param name="builder">The <see cref="StringBuilder"/> to append the rendered data to.</param>
/// <param name="logEvent">Logging event.</param>
protected override void Append(StringBuilder builder, LogEventInfo logEvent)
{
var dir = _baseDir;
#if !SILVERLIGHT && !NETSTANDARD1_3
if (ProcessDir)
{
dir = _processDir ?? (_processDir = Path.GetDirectoryName(ProcessIDHelper.Instance.CurrentProcessFilePath));
}
#endif
if (dir != null)
{
var path = PathHelpers.CombinePaths(dir, Dir, File);
builder.Append(path);
}
}
}
}
| 1 | 20,102 | Please use ".NET Core 3" - i'm trying hard to use one form, and this is the one MS advices | NLog-NLog | .cs |
@@ -87,6 +87,11 @@ func StartSession(params *TelemetrySessionParams, statsEngine stats.Engine) erro
seelog.Errorf("Error: lost websocket connection with ECS Telemetry service (TCS): %v", tcsError)
params.time().Sleep(backoff.Duration())
}
+ select {
+ case <-params.Ctx.Done():
+ return nil
+ default:
+ }
}
}
| 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package tcshandler
import (
"io"
"net/url"
"strings"
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/stats"
tcsclient "github.com/aws/amazon-ecs-agent/agent/tcs/client"
"github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs"
"github.com/aws/amazon-ecs-agent/agent/utils/retry"
"github.com/aws/amazon-ecs-agent/agent/version"
"github.com/aws/amazon-ecs-agent/agent/wsclient"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/cihub/seelog"
)
const (
// defaultPublishMetricsInterval is the interval at which utilization
// metrics from stats engine are published to the backend.
defaultPublishMetricsInterval = 20 * time.Second
// The maximum time to wait between heartbeats without disconnecting
defaultHeartbeatTimeout = 1 * time.Minute
defaultHeartbeatJitter = 1 * time.Minute
// wsRWTimeout is the duration of read and write deadline for the
// websocket connection
wsRWTimeout = 2*defaultHeartbeatTimeout + defaultHeartbeatJitter
deregisterContainerInstanceHandler = "TCSDeregisterContainerInstanceHandler"
)
// StartMetricsSession starts a metric session. It initializes the stats engine
// and invokes StartSession.
func StartMetricsSession(params *TelemetrySessionParams) {
ok, err := params.isContainerHealthMetricsDisabled()
if err != nil {
seelog.Warnf("Error starting metrics session: %v", err)
return
}
if ok {
seelog.Warnf("Metrics were disabled, not starting the telemetry session")
return
}
err = params.StatsEngine.MustInit(params.Ctx, params.TaskEngine, params.Cfg.Cluster,
params.ContainerInstanceArn)
if err != nil {
seelog.Warnf("Error initializing metrics engine: %v", err)
return
}
err = StartSession(params, params.StatsEngine)
if err != nil {
seelog.Warnf("Error starting metrics session with backend: %v", err)
}
}
// StartSession creates a session with the backend and handles requests
// using the passed in arguments.
// The engine is expected to initialized and gathering container metrics by
// the time the websocket client starts using it.
func StartSession(params *TelemetrySessionParams, statsEngine stats.Engine) error {
backoff := retry.NewExponentialBackoff(time.Second, 1*time.Minute, 0.2, 2)
for {
tcsError := startTelemetrySession(params, statsEngine)
if tcsError == nil || tcsError == io.EOF {
seelog.Info("TCS Websocket connection closed for a valid reason")
backoff.Reset()
} else {
seelog.Errorf("Error: lost websocket connection with ECS Telemetry service (TCS): %v", tcsError)
params.time().Sleep(backoff.Duration())
}
}
}
func startTelemetrySession(params *TelemetrySessionParams, statsEngine stats.Engine) error {
tcsEndpoint, err := params.ECSClient.DiscoverTelemetryEndpoint(params.ContainerInstanceArn)
if err != nil {
seelog.Errorf("tcs: unable to discover poll endpoint: %v", err)
return err
}
url := formatURL(tcsEndpoint, params.Cfg.Cluster, params.ContainerInstanceArn, params.TaskEngine)
return startSession(url, params.Cfg, params.CredentialProvider, statsEngine,
defaultHeartbeatTimeout, defaultHeartbeatJitter, defaultPublishMetricsInterval,
params.DeregisterInstanceEventStream)
}
func startSession(url string,
cfg *config.Config,
credentialProvider *credentials.Credentials,
statsEngine stats.Engine,
heartbeatTimeout, heartbeatJitter,
publishMetricsInterval time.Duration,
deregisterInstanceEventStream *eventstream.EventStream) error {
client := tcsclient.New(url, cfg, credentialProvider, statsEngine,
publishMetricsInterval, wsRWTimeout, cfg.DisableMetrics)
defer client.Close()
err := deregisterInstanceEventStream.Subscribe(deregisterContainerInstanceHandler, client.Disconnect)
if err != nil {
return err
}
defer deregisterInstanceEventStream.Unsubscribe(deregisterContainerInstanceHandler)
err = client.Connect()
if err != nil {
seelog.Errorf("Error connecting to TCS: %v", err.Error())
return err
}
seelog.Info("Connected to TCS endpoint")
// start a timer and listens for tcs heartbeats/acks. The timer is reset when
// we receive a heartbeat from the server or when a publish metrics message
// is acked.
timer := time.AfterFunc(retry.AddJitter(heartbeatTimeout, heartbeatJitter), func() {
// Close the connection if there haven't been any messages received from backend
// for a long time.
seelog.Info("TCS Connection hasn't had any activity for too long; disconnecting")
client.Disconnect()
})
defer timer.Stop()
client.AddRequestHandler(heartbeatHandler(timer))
client.AddRequestHandler(ackPublishMetricHandler(timer))
client.AddRequestHandler(ackPublishHealthMetricHandler(timer))
client.SetAnyRequestHandler(anyMessageHandler(client))
return client.Serve()
}
// heartbeatHandler resets the heartbeat timer when HeartbeatMessage message is received from tcs.
func heartbeatHandler(timer *time.Timer) func(*ecstcs.HeartbeatMessage) {
return func(*ecstcs.HeartbeatMessage) {
seelog.Debug("Received HeartbeatMessage from tcs")
timer.Reset(retry.AddJitter(defaultHeartbeatTimeout, defaultHeartbeatJitter))
}
}
// ackPublishMetricHandler consumes the ack message from the backend. THe backend sends
// the ack each time it processes a metric message.
func ackPublishMetricHandler(timer *time.Timer) func(*ecstcs.AckPublishMetric) {
return func(*ecstcs.AckPublishMetric) {
seelog.Debug("Received AckPublishMetric from tcs")
timer.Reset(retry.AddJitter(defaultHeartbeatTimeout, defaultHeartbeatJitter))
}
}
// ackPublishHealthMetricHandler consumes the ack message from backend. The backend sends
// the ack each time it processes a health message
func ackPublishHealthMetricHandler(timer *time.Timer) func(*ecstcs.AckPublishHealth) {
return func(*ecstcs.AckPublishHealth) {
seelog.Debug("Received ACKPublishHealth from tcs")
timer.Reset(retry.AddJitter(defaultHeartbeatTimeout, defaultHeartbeatJitter))
}
}
// anyMessageHandler handles any server message. Any server message means the
// connection is active
func anyMessageHandler(client wsclient.ClientServer) func(interface{}) {
return func(interface{}) {
seelog.Trace("TCS activity occurred")
// Reset read deadline as there's activity on the channel
if err := client.SetReadDeadline(time.Now().Add(wsRWTimeout)); err != nil {
seelog.Warnf("Unable to extend read deadline for TCS connection: %v", err)
}
}
}
// formatURL returns formatted url for tcs endpoint.
func formatURL(endpoint string, cluster string, containerInstance string, taskEngine engine.TaskEngine) string {
tcsURL := endpoint
if !strings.HasSuffix(tcsURL, "/") {
tcsURL += "/"
}
query := url.Values{}
query.Set("cluster", cluster)
query.Set("containerInstance", containerInstance)
query.Set("agentVersion", version.Version)
query.Set("agentHash", version.GitHashString())
if dockerVersion, err := taskEngine.Version(); err == nil {
query.Set("dockerVersion", dockerVersion)
}
return tcsURL + "ws?" + query.Encode()
}
| 1 | 24,330 | Unrelated to this change, but this is a fix for when TestDoStartCgroupInitHappyPath has a failure after the test goroutine has already exited. | aws-amazon-ecs-agent | go |
@@ -29,6 +29,7 @@ import qutebrowser
from qutebrowser.utils import docutils
from qutebrowser.browser import pdfjs
+from end2end.features.test_scroll_bdd import check_scrolled, check_not_scrolled
bdd.scenarios('misc.feature')
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import sys
import json
import os.path
import subprocess
import pytest
import pytest_bdd as bdd
import qutebrowser
from qutebrowser.utils import docutils
from qutebrowser.browser import pdfjs
bdd.scenarios('misc.feature')
@bdd.when("the documentation is up to date")
def update_documentation():
"""Update the docs before testing :help."""
base_path = os.path.dirname(os.path.abspath(qutebrowser.__file__))
doc_path = os.path.join(base_path, 'html', 'doc')
script_path = os.path.join(base_path, '..', 'scripts')
if not os.path.exists(doc_path):
# On CI, we can test this without actually building the docs
return
if all(docutils.docs_up_to_date(p) for p in os.listdir(doc_path)):
return
try:
subprocess.call(['asciidoc'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except OSError:
pytest.skip("Docs outdated and asciidoc unavailable!")
update_script = os.path.join(script_path, 'asciidoc2html.py')
subprocess.call([sys.executable, update_script])
@bdd.given('pdfjs is available')
def pdfjs_available():
if not pdfjs.is_available():
pytest.skip("No pdfjs installation found.")
@bdd.then(bdd.parsers.parse('the cookie {name} should be set to {value}'))
def check_cookie(quteproc, name, value):
"""Check if a given cookie is set correctly.
This assumes we're on the httpbin cookies page.
"""
content = quteproc.get_content()
data = json.loads(content)
print(data)
assert data['cookies'][name] == value
| 1 | 15,026 | Hmm, I'd really expect this to work, and yet it doesn't. I'll investigate later, though it might get Monday until I get the time. | qutebrowser-qutebrowser | py |
@@ -356,7 +356,7 @@ TEST(svm_thunder_dense_test, can_classify_any_two_labels) {
auto support_indices_table = result_train.get_support_indices();
const auto support_indices = row_accessor<const float>(support_indices_table).pull();
- for (size_t i = 0; i < support_indices.get_count(); i++) {
+ for (std::int64_t i = 0; i < support_indices.get_count(); i++) {
ASSERT_EQ(support_indices[i], i);
}
| 1 | /*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "gtest/gtest.h"
#include "oneapi/dal/algo/svm/infer.hpp"
#include "oneapi/dal/algo/svm/train.hpp"
#include "oneapi/dal/table/homogen.hpp"
#include "oneapi/dal/table/row_accessor.hpp"
using namespace oneapi::dal;
using std::int32_t;
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, -1.f, -1.f, -1.f, -1.f, -2.f, +1.f, +1.f, +1.f, +2.f, +2.f, +1.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
constexpr std::int64_t support_index_negative = 1;
constexpr std::int64_t support_index_positive = 3;
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto svm_desc = svm::descriptor{}.set_c(1.0);
const auto result_train = train(svm_desc, x_train_table, y_train_table);
ASSERT_EQ(result_train.get_support_vector_count(), 2);
auto support_indices_table = result_train.get_support_indices();
const auto support_indices = row_accessor<const float>(support_indices_table).pull();
ASSERT_EQ(support_indices[0], support_index_negative);
ASSERT_EQ(support_indices[1], support_index_positive);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_train_table);
auto decision_function_table = result_infer.get_decision_function();
const auto decision_function = row_accessor<const float>(decision_function_table).pull();
ASSERT_FLOAT_EQ(decision_function[support_index_negative], -1.f);
ASSERT_FLOAT_EQ(decision_function[support_index_positive], +1.f);
}
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface_with_not_default_linear_kernel) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, -1.f, -1.f, -1.f, -1.f, -2.f, +1.f, +1.f, +1.f, +2.f, +2.f, +1.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
constexpr std::int64_t support_index_negative = 1;
constexpr std::int64_t support_index_positive = 3;
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto kernel_desc = linear_kernel::descriptor{}.set_scale(0.1).set_shift(0.0);
const auto svm_desc = svm::descriptor{ kernel_desc }.set_c(10.0);
const auto result_train = train(svm_desc, x_train_table, y_train_table);
ASSERT_EQ(result_train.get_support_vector_count(), 2);
auto support_indices_table = result_train.get_support_indices();
const auto support_indices = row_accessor<const float>(support_indices_table).pull();
ASSERT_EQ(support_indices[0], support_index_negative);
ASSERT_EQ(support_indices[1], support_index_positive);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_train_table);
auto decision_function_table = result_infer.get_decision_function();
const auto decision_function = row_accessor<const float>(decision_function_table).pull();
ASSERT_FLOAT_EQ(decision_function[support_index_negative], -1.f);
ASSERT_FLOAT_EQ(decision_function[support_index_positive], +1.f);
}
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface_with_big_margin) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, -1.f, -1.f, -1.f, -1.f, -2.f, +1.f, +1.f, +1.f, +2.f, +2.f, +1.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto svm_desc = svm::descriptor{}.set_c(1e-1);
const auto result_train = train(svm_desc, x_train_table, y_train_table);
ASSERT_EQ(result_train.get_support_vector_count(), row_count_train);
auto support_indices_table = result_train.get_support_indices();
const auto support_indices = row_accessor<const float>(support_indices_table).pull();
for (std::int64_t i = 0; i < support_indices.get_count(); i++)
ASSERT_EQ(support_indices[i], i);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_train_table);
auto labels_table = result_infer.get_labels();
const auto labels = row_accessor<const float>(labels_table).pull();
for (std::int64_t i = 0; i < row_count_train / 2; i++) {
ASSERT_FLOAT_EQ(labels[i], -1.f);
}
for (std::int64_t i = row_count_train / 2; i < row_count_train; i++) {
ASSERT_FLOAT_EQ(labels[i], +1.f);
}
}
TEST(svm_thunder_dense_test, can_classify_linear_not_separable_surface) {
constexpr std::int64_t row_count_train = 8;
constexpr std::int64_t column_count = 2;
const float x_train[] = { -2.f, -1.f, -1.f, -1.f, -1.f, -2.f, +1.f, +1.f,
+1.f, +2.f, +2.f, +1.f, -3.f, -3.f, +3.f, +3.f };
const float y_train[] = { -1.f, -1.f, -1.f, +1.f, +1.f, +1.f, +1.f, -1.f };
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto svm_desc = svm::descriptor{}.set_c(1.0);
const auto result_train = train(svm_desc, x_train_table, y_train_table);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_train_table);
auto labels_table = result_infer.get_labels();
const auto labels = row_accessor<const float>(labels_table).pull();
for (int i = 0; i < 3; i++) {
ASSERT_FLOAT_EQ(labels[i], -1.f);
}
for (int i = 3; i < 6; i++) {
ASSERT_FLOAT_EQ(labels[i], +1.f);
}
}
TEST(svm_thunder_dense_test, can_classify_quadric_separable_surface_with_rbf_kernel) {
constexpr std::int64_t row_count_train = 12;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, 0.f, -2.f, -1.f, -2.f, +1.f, +2.f, 0.f, +2.f, -1.f, +2.f, +1.f,
-1.f, 0.f, -1.f, -0.5f, -1.f, +0.5f, +1.f, 0.5f, +1.f, -0.5f, +1.f, +0.5f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, -1.f, -1.f, -1.f, +1.f, +1.f, +1.f, +1.f, +1.f, +1.f,
};
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto kernel_desc = rbf_kernel::descriptor{}.set_sigma(1.0);
const auto svm_desc = svm::descriptor{ kernel_desc }.set_c(1.0);
const auto result_train = train(svm_desc, x_train_table, y_train_table);
ASSERT_EQ(result_train.get_support_vector_count(), row_count_train);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_train_table);
auto labels_table = result_infer.get_labels();
const auto labels = row_accessor<const float>(labels_table).pull();
for (std::int64_t i = 0; i < row_count_train / 2; i++) {
ASSERT_FLOAT_EQ(labels[i], -1.f);
}
for (std::int64_t i = row_count_train / 2; i < row_count_train; i++) {
ASSERT_FLOAT_EQ(labels[i], +1.f);
}
}
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface_with_equal_weights) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, 0.f, -1.f, -1.f, 0.f, -2.f, 0.f, +2.f, +1.f, +1.f, +2.f, +0.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
const float weights[] = {
+1.f, +1.f, +1.f, +1.f, +1.f, +1.f,
};
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto weights_table = homogen_table::wrap(weights, row_count_train, 1);
const auto svm_desc = svm::descriptor{}.set_c(0.1);
const auto result_train = train(svm_desc, x_train_table, y_train_table, weights_table);
const float x_test[] = {
-1.f,
1.f,
};
const auto x_test_table = homogen_table::wrap(x_test, 1, column_count);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_test_table);
auto decision_function_table = result_infer.get_decision_function();
const auto decision_function = row_accessor<const float>(decision_function_table).pull();
ASSERT_NEAR(decision_function[0], 0.f, 1e-6);
}
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface_with_boundary_weights) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, 0.f, -1.f, -1.f, 0.f, -2.f, 0.f, +2.f, +1.f, +1.f, +2.f, +0.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
const float weights[] = {
+10.f, +0.1f, +0.1f, +0.1f, +.1f, 10.f,
};
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto weights_table = homogen_table::wrap(weights, row_count_train, 1);
const auto svm_desc = svm::descriptor{}.set_c(0.1);
const auto result_train = train(svm_desc, x_train_table, y_train_table, weights_table);
const float x_test[] = {
-1.f,
1.f,
};
const auto x_test_table = homogen_table::wrap(x_test, 1, column_count);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_test_table);
auto decision_function_table = result_infer.get_decision_function();
const auto decision_function = row_accessor<const float>(decision_function_table).pull();
ASSERT_LT(decision_function[0], 0.f);
}
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface_with_center_weights) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, 0.f, -1.f, -1.f, 0.f, -2.f, 0.f, +2.f, +1.f, +1.f, +2.f, +0.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
const float weights[] = {
+0.1f, +0.1f, +10.f, +10.f, +0.1f, +0.1f,
};
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto weights_table = homogen_table::wrap(weights, row_count_train, 1);
const auto svm_desc = svm::descriptor{}.set_c(0.1);
const auto result_train = train(svm_desc, x_train_table, y_train_table, weights_table);
const float x_test[] = {
-1.f,
1.f,
};
const auto x_test_table = homogen_table::wrap(x_test, 1, column_count);
const auto result_infer = infer(svm_desc, result_train.get_model(), x_test_table);
auto decision_function_table = result_infer.get_decision_function();
const auto decision_function = row_accessor<const float>(decision_function_table).pull();
ASSERT_GT(decision_function[0], 0.f);
}
TEST(svm_thunder_dense_test, can_classify_linear_separable_surface_with_different_type_desc) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
const float x_train[] = {
-2.f, -1.f, -1.f, -1.f, -1.f, -2.f, +1.f, +1.f, +1.f, +2.f, +2.f, +1.f,
};
const float y_train[] = {
-1.f, -1.f, -1.f, +1.f, +1.f, +1.f,
};
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto svm_desc_train = svm::descriptor<float>{}.set_c(1e-1);
const auto result_train = train(svm_desc_train, x_train_table, y_train_table);
ASSERT_EQ(result_train.get_support_vector_count(), row_count_train);
auto support_indices_table = result_train.get_support_indices();
const auto support_indices = row_accessor<const float>(support_indices_table).pull();
for (std::int64_t i = 0; i < support_indices.get_count(); i++)
ASSERT_EQ(support_indices[i], i);
const auto svm_desc_infer = svm::descriptor<double>{}.set_c(1e-1);
const auto result_infer = infer(svm_desc_infer, result_train.get_model(), x_train_table);
auto labels_table = result_infer.get_labels();
const auto labels = row_accessor<const float>(labels_table).pull();
for (std::int64_t i = 0; i < row_count_train / 2; i++) {
ASSERT_FLOAT_EQ(labels[i], -1.f);
}
for (std::int64_t i = row_count_train / 2; i < row_count_train; i++) {
ASSERT_FLOAT_EQ(labels[i], +1.f);
}
}
TEST(svm_thunder_dense_test, can_classify_any_two_labels) {
constexpr std::int64_t row_count_train = 6;
constexpr std::int64_t column_count = 2;
constexpr std::int64_t range_count = 4;
const float x_train[] = {
-2.f, -1.f, -1.f, -1.f, -1.f, -2.f, +1.f, +1.f, +1.f, +2.f, +2.f, +1.f,
};
const float expected_labels_range[range_count][2] = {
{ -1.f, +1.f },
{ +0.f, +1.f },
{ +0.f, +2.f },
{ -1.f, +0.f },
};
const float y_train_range[range_count][row_count_train] = {
{
-1.f,
-1.f,
-1.f,
+1.f,
+1.f,
+1.f,
},
{
0.f,
0.f,
0.f,
+1.f,
+1.f,
+1.f,
},
{
0.f,
0.f,
0.f,
+2.f,
+2.f,
+2.f,
},
{
-1.f,
-1.f,
-1.f,
+0.f,
+0.f,
+0.f,
},
};
for (std::int64_t i = 0; i < range_count; ++i) {
const auto y_train = y_train_range[i];
const auto expected_labels = expected_labels_range[i];
const auto x_train_table = homogen_table::wrap(x_train, row_count_train, column_count);
const auto y_train_table = homogen_table::wrap(y_train, row_count_train, 1);
const auto svm_desc_train = svm::descriptor<float>{}.set_c(1e-1);
const auto result_train = train(svm_desc_train, x_train_table, y_train_table);
ASSERT_EQ(result_train.get_support_vector_count(), row_count_train);
auto support_indices_table = result_train.get_support_indices();
const auto support_indices = row_accessor<const float>(support_indices_table).pull();
for (size_t i = 0; i < support_indices.get_count(); i++) {
ASSERT_EQ(support_indices[i], i);
}
ASSERT_EQ(result_train.get_model().get_first_class_label(), expected_labels[0]);
ASSERT_EQ(result_train.get_model().get_second_class_label(), expected_labels[1]);
}
}
| 1 | 24,326 | Does this changes affect process building dynamic libraries anyhow? | oneapi-src-oneDAL | cpp |
@@ -26,7 +26,7 @@ import struct
import time
from scapy.packet import *
from scapy.fields import *
-from scapy.contrib.ppi import PPIGenericFldHdr, addPPIType
+from scapy.layers.dot11 import *
from scapy.error import warning
import scapy.modules.six as six
from scapy.modules.six.moves import range | 1 | # This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# author: <[email protected]>
# scapy.contrib.description = PPI GEOLOCATION
# scapy.contrib.status = loads
"""
PPI-GEOLOCATION tags
"""
from __future__ import absolute_import
import struct
import time
from scapy.packet import *
from scapy.fields import *
from scapy.contrib.ppi import PPIGenericFldHdr, addPPIType
from scapy.error import warning
import scapy.modules.six as six
from scapy.modules.six.moves import range
CURR_GEOTAG_VER = 2 # Major revision of specification
PPI_GPS = 30002
PPI_VECTOR = 30003
PPI_SENSOR = 30004
PPI_ANTENNA = 30005
# The FixedX_Y Fields are used to store fixed point numbers in a variety of fields in the GEOLOCATION-TAGS specification
class Fixed3_6Field(LEIntField):
def i2h(self, pkt, x):
if x is not None:
if (x < 0):
warning("Fixed3_6: Internal value too negative: %d", x)
x = 0
elif (x > 999999999):
warning("Fixed3_6: Internal value too positive: %d", x)
x = 999999999
x = x * 1e-6
return x
def h2i(self, pkt, x):
if x is not None:
if (x <= -0.5e-6):
warning("Fixed3_6: Input value too negative: %.7f", x)
x = 0
elif (x >= 999.9999995):
warning("Fixed3_6: Input value too positive: %.7f", x)
x = 999.999999
x = int(round(x * 1e6))
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
# Try to return zero if undefined
x = self.h2i(pkt, 0)
return x
def i2repr(self, pkt, x):
if x is None:
y = 0
else:
y = self.i2h(pkt, x)
return "%3.6f" % (y)
class Fixed3_7Field(LEIntField):
def i2h(self, pkt, x):
if x is not None:
if (x < 0):
warning("Fixed3_7: Internal value too negative: %d", x)
x = 0
elif (x > 3600000000):
warning("Fixed3_7: Internal value too positive: %d", x)
x = 3600000000
x = (x - 1800000000) * 1e-7
return x
def h2i(self, pkt, x):
if x is not None:
if (x <= -180.00000005):
warning("Fixed3_7: Input value too negative: %.8f", x)
x = -180.0
elif (x >= 180.00000005):
warning("Fixed3_7: Input value too positive: %.8f", x)
x = 180.0
x = int(round((x + 180.0) * 1e7))
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
# Try to return zero if undefined
x = self.h2i(pkt, 0)
return x
def i2repr(self, pkt, x):
if x is None:
y = 0
else:
y = self.i2h(pkt, x)
return "%3.7f" % (y)
class Fixed6_4Field(LEIntField):
def i2h(self, pkt, x):
if x is not None:
if (x < 0):
warning("Fixed6_4: Internal value too negative: %d", x)
x = 0
elif (x > 3600000000):
warning("Fixed6_4: Internal value too positive: %d", x)
x = 3600000000
x = (x - 1800000000) * 1e-4
return x
def h2i(self, pkt, x):
if x is not None:
if (x <= -180000.00005):
warning("Fixed6_4: Input value too negative: %.5f", x)
x = -180000.0
elif (x >= 180000.00005):
warning("Fixed6_4: Input value too positive: %.5f", x)
x = 180000.0
x = int(round((x + 180000.0) * 1e4))
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
# Try to return zero if undefined
x = self.h2i(pkt, 0)
return x
def i2repr(self, pkt, x):
if x is None:
y = 0
else:
y = self.i2h(pkt, x)
return "%6.4f" % (y)
# The GPS timestamps fractional time counter is stored in a 32-bit unsigned ns counter.
# The ept field is as well,
class NSCounter_Field(LEIntField):
def i2h(self, pkt, x): # converts nano-seconds to seconds for output
if x is not None:
if (x < 0):
warning("NSCounter_Field: Internal value too negative: %d", x)
x = 0
elif (x >= 2**32):
warning("NSCounter_Field: Internal value too positive: %d", x)
x = 2**32 - 1
x = (x / 1e9)
return x
def h2i(self, pkt, x): # converts input in seconds into nano-seconds for storage
if x is not None:
if (x < 0):
warning("NSCounter_Field: Input value too negative: %.10f", x)
x = 0
elif (x >= (2**32) / 1e9):
warning("NSCounter_Field: Input value too positive: %.10f", x)
x = (2**32 - 1) / 1e9
x = int(round((x * 1e9)))
return x
def i2repr(self, pkt, x):
if x is None:
y = 0
else:
y = self.i2h(pkt, x)
return "%1.9f" % (y)
class LETimeField(UTCTimeField, LEIntField):
__slots__ = ["epoch", "delta", "strf"]
def __init__(self, name, default, epoch=None, strf="%a, %d %b %Y %H:%M:%S +0000"):
LEIntField.__init__(self, name, default)
UTCTimeField.__init__(self, name, default, epoch=epoch, strf=strf)
class SignedByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "b")
def randval(self):
return RandSByte()
class XLEShortField(LEShortField, XShortField):
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x)
class XLEIntField(LEIntField, XIntField):
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x)
class GPSTime_Field(LETimeField):
def __init__(self, name, default):
return LETimeField.__init__(self, name, default, strf="%a, %d %b %Y %H:%M:%S UTC")
class VectorFlags_Field(XLEIntField):
"""Represents te VectorFlags field. Handles the RelativeTo:sub-field"""
_fwdstr = "DefinesForward"
_resmask = 0xfffffff8
_relmask = 0x6
_relnames = ["RelativeToForward", "RelativeToEarth", "RelativeToCurrent", "RelativeToReserved"]
_relvals = [0x00, 0x02, 0x04, 0x06]
def i2repr(self, pkt, x):
if x is None:
return str(x)
r = []
if (x & 0x1):
r.append(self._fwdstr)
i = (x & self._relmask) >> 1
r.append(self._relnames[i])
i = x & self._resmask
if (i):
r.append("ReservedBits:%08X" % i)
sout = "+".join(r)
return sout
def any2i(self, pkt, x):
if isinstance(x, str):
r = x.split("+")
y = 0
for value in r:
if (value == self._fwdstr):
y |= 0x1
elif (value in self._relnames):
i = self._relnames.index(value)
y &= (~self._relmask)
y |= self._relvals[i]
else:
# logging.warning("Unknown VectorFlags Argument: %s", value)
pass
else:
y = x
# print "any2i: %s --> %s" % (str(x), str(y))
return y
class HCSIFlagsField(FlagsField):
""" A FlagsField where each bit/flag turns a conditional field on or off.
If the value is None when building a packet, i2m() will check the value of
every field in self.names. If the field's value is not None, the corresponding
flag will be set. """
def i2m(self, pkt, val):
if val is None:
val = 0
if (pkt):
for i, name in enumerate(self.names):
value = pkt.getfieldval(name)
if value is not None:
val |= 1 << i
return val
class HCSINullField(StrFixedLenField):
def __init__(self, name, default):
return StrFixedLenField.__init__(self, name, default, length=0)
class HCSIDescField(StrFixedLenField):
def __init__(self, name, default):
return StrFixedLenField.__init__(self, name, default, length=32)
class HCSIAppField(StrFixedLenField):
def __init__(self, name, default):
return StrFixedLenField.__init__(self, name, default, length=60)
def _FlagsList(myfields):
flags = ["Reserved%02d" % i for i in range(32)]
for i, value in six.iteritems(myfields):
flags[i] = value
return flags
# Define all geolocation-tag flags lists
_hcsi_gps_flags = _FlagsList({0: "No Fix Available", 1: "GPS", 2: "Differential GPS",
3: "Pulse Per Second", 4: "Real Time Kinematic",
5: "Float Real Time Kinematic", 6: "Estimated (Dead Reckoning)",
7: "Manual Input", 8: "Simulation"})
# _hcsi_vector_flags = _FlagsList({0:"ForwardFrame", 1:"RotationsAbsoluteXYZ", 5:"OffsetFromGPS_XYZ"})
# This has been replaced with the VectorFlags_Field class, in order to handle the RelativeTo:subfield
_hcsi_vector_char_flags = _FlagsList({0: "Antenna", 1: "Direction of Travel",
2: "Front of Vehicle", 3: "Angle of Arrival", 4: "Transmitter Position",
8: "GPS Derived", 9: "INS Derived", 10: "Compass Derived",
11: "Acclerometer Derived", 12: "Human Derived"})
_hcsi_antenna_flags = _FlagsList({1: "Horizontal Polarization", 2: "Vertical Polarization",
3: "Circular Polarization Left", 4: "Circular Polarization Right",
16: "Electronically Steerable", 17: "Mechanically Steerable"})
""" HCSI PPI Fields are similar to RadioTap. A mask field called "present" specifies if each field
is present. All other fields are conditional. When dissecting a packet, each field is present if
"present" has the corresponding bit set. When building a packet, if "present" is None, the mask is
set to include every field that does not have a value of None. Otherwise, if the mask field is
not None, only the fields specified by "present" will be added to the packet.
To build each Packet type, build a list of the fields normally, excluding the present bitmask field.
The code will then construct conditional versions of each field and add the present field.
See GPS_Fields as an example. """
# Conditional test for all HCSI Fields
def _HCSITest(pkt, ibit, name):
if pkt.present is None:
return (pkt.getfieldval(name) is not None)
return pkt.present & ibit
# Wrap optional fields in ConditionalField, add HCSIFlagsField
def _HCSIBuildFields(fields):
names = [f.name for f in fields]
cond_fields = [HCSIFlagsField('present', None, -len(names), names)]
for i, name in enumerate(names):
ibit = 1 << i
seval = "lambda pkt:_HCSITest(pkt,%s,'%s')" % (ibit, name)
test = eval(seval)
cond_fields.append(ConditionalField(fields[i], test))
return cond_fields
class HCSIPacket(Packet):
name = "PPI HCSI"
fields_desc = [LEShortField('pfh_type', None),
LEShortField('pfh_length', None),
ByteField('geotag_ver', CURR_GEOTAG_VER),
ByteField('geotag_pad', 0),
LEShortField('geotag_len', None)]
def post_build(self, p, pay):
if self.pfh_length is None:
l = len(p) - 4
sl = struct.pack('<H', l)
p = p[:2] + sl + p[4:]
if self.geotag_len is None:
l_g = len(p) - 4
sl_g = struct.pack('<H', l_g)
p = p[:6] + sl_g + p[8:]
p += pay
return p
def extract_padding(self, p):
return b"", p
# GPS Fields
GPS_Fields = [FlagsField("GPSFlags", None, -32, _hcsi_gps_flags),
Fixed3_7Field("Latitude", None),
Fixed3_7Field("Longitude", None), Fixed6_4Field("Altitude", None),
Fixed6_4Field("Altitude_g", None), GPSTime_Field("GPSTime", None),
NSCounter_Field("FractionalTime", None), Fixed3_6Field("eph", None),
Fixed3_6Field("epv", None), NSCounter_Field("ept", None),
HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
HCSINullField("Reserved16", None), HCSINullField("Reserved17", None),
HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
HCSINullField("Reserved26", None), HCSINullField("Reserved27", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class GPS(HCSIPacket):
name = "PPI GPS"
fields_desc = [LEShortField('pfh_type', PPI_GPS), # pfh_type
LEShortField('pfh_length', None), # pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), # base_geotag_header.ver
ByteField('geotag_pad', 0), # base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(GPS_Fields)
# Vector Fields
VEC_Fields = [VectorFlags_Field("VectorFlags", None),
FlagsField("VectorChars", None, -32, _hcsi_vector_char_flags),
Fixed3_6Field("Pitch", None), Fixed3_6Field("Roll", None),
Fixed3_6Field("Heading", None), Fixed6_4Field("Off_X", None),
Fixed6_4Field("Off_Y", None), Fixed6_4Field("Off_Z", None),
HCSINullField("Reserved08", None), HCSINullField("Reserved09", None),
HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
Fixed3_6Field("Err_Rot", None), Fixed6_4Field("Err_Off", None),
HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
HCSINullField("Reserved26", None), HCSINullField("Reserved27", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class Vector(HCSIPacket):
name = "PPI Vector"
fields_desc = [LEShortField('pfh_type', PPI_VECTOR), # pfh_type
LEShortField('pfh_length', None), # pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), # base_geotag_header.ver
ByteField('geotag_pad', 0), # base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(VEC_Fields)
# Sensor Fields
# http://www.iana.org/assignments/icmp-parameters
sensor_types = {1: "Velocity",
2: "Acceleration",
3: "Jerk",
100: "Rotation",
101: "Magnetic",
1000: "Temperature",
1001: "Barometer",
1002: "Humidity",
2000: "TDOA_Clock",
2001: "Phase"
}
SENS_Fields = [LEShortEnumField('SensorType', None, sensor_types),
SignedByteField('ScaleFactor', None),
Fixed6_4Field('Val_X', None),
Fixed6_4Field('Val_Y', None),
Fixed6_4Field('Val_Z', None),
Fixed6_4Field('Val_T', None),
Fixed6_4Field('Val_E', None),
HCSINullField("Reserved07", None), HCSINullField("Reserved08", None),
HCSINullField("Reserved09", None), HCSINullField("Reserved10", None),
HCSINullField("Reserved11", None), HCSINullField("Reserved12", None),
HCSINullField("Reserved13", None), HCSINullField("Reserved14", None),
HCSINullField("Reserved15", None), HCSINullField("Reserved16", None),
HCSINullField("Reserved17", None), HCSINullField("Reserved18", None),
HCSINullField("Reserved19", None), HCSINullField("Reserved20", None),
HCSINullField("Reserved21", None), HCSINullField("Reserved22", None),
HCSINullField("Reserved23", None), HCSINullField("Reserved24", None),
HCSINullField("Reserved25", None), HCSINullField("Reserved26", None),
HCSINullField("Reserved27", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class Sensor(HCSIPacket):
name = "PPI Sensor"
fields_desc = [LEShortField('pfh_type', PPI_SENSOR), # pfh_type
LEShortField('pfh_length', None), # pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), # base_geotag_header.ver
ByteField('geotag_pad', 0), # base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(SENS_Fields)
# HCSIAntenna Fields
ANT_Fields = [FlagsField("AntennaFlags", None, -32, _hcsi_antenna_flags),
ByteField("Gain", None),
Fixed3_6Field("HorizBw", None), Fixed3_6Field("VertBw", None),
Fixed3_6Field("PrecisionGain", None), XLEShortField("BeamID", None),
HCSINullField("Reserved06", None), HCSINullField("Reserved07", None),
HCSINullField("Reserved08", None), HCSINullField("Reserved09", None),
HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
HCSINullField("Reserved16", None), HCSINullField("Reserved17", None),
HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
HCSIDescField("SerialNumber", None), HCSIDescField("ModelName", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class Antenna(HCSIPacket):
name = "PPI Antenna"
fields_desc = [LEShortField('pfh_type', PPI_ANTENNA), # pfh_type
LEShortField('pfh_length', None), # pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), # base_geotag_header.ver
ByteField('geotag_pad', 0), # base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(ANT_Fields)
addPPIType(PPI_GPS, GPS)
addPPIType(PPI_VECTOR, Vector)
addPPIType(PPI_SENSOR, Sensor)
addPPIType(PPI_ANTENNA, Antenna)
| 1 | 13,185 | Could you limit what is imported here? | secdev-scapy | py |
@@ -1078,12 +1078,15 @@ def do_set_function_code(lambda_function: LambdaFunction):
if not lambda_cwd:
return
- # get local lambda working directory
+ # get local lambda code archive path
tmp_file = os.path.join(lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode="rb")
+ # override lambda archive with fresh code if we got an update
+ save_file(tmp_file, zip_file_content)
+
# Set the appropriate lambda handler.
lambda_handler = generic_handler
is_java = lambda_executors.is_java_lambda(runtime) | 1 | import base64
import functools
import hashlib
import importlib.machinery
import json
import logging
import os
import re
import sys
import threading
import time
import traceback
import uuid
from datetime import datetime
from io import BytesIO
from threading import BoundedSemaphore
from typing import Any, Dict, List
from flask import Flask, Response, jsonify, request
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlparse
from localstack import config
from localstack.constants import APPLICATION_JSON, TEST_AWS_ACCOUNT_ID
from localstack.services.awslambda import lambda_executors
from localstack.services.awslambda.lambda_utils import (
DOTNET_LAMBDA_RUNTIMES,
LAMBDA_DEFAULT_HANDLER,
LAMBDA_DEFAULT_RUNTIME,
LAMBDA_DEFAULT_STARTING_POSITION,
get_handler_file_from_name,
get_lambda_runtime,
multi_value_dict_for_list,
)
from localstack.services.generic_proxy import RegionBackend
from localstack.services.install import install_go_lambda_runtime
from localstack.utils.analytics import event_publisher
from localstack.utils.aws import aws_responses, aws_stack
from localstack.utils.aws.aws_models import CodeSigningConfig, LambdaFunction
from localstack.utils.common import (
TMP_FILES,
empty_context_manager,
ensure_readable,
first_char_to_lower,
get_all_subclasses,
is_zip_file,
isoformat_milliseconds,
json_safe,
load_file,
long_uid,
mkdir,
now_utc,
parse_request_data,
run,
run_for_max_seconds,
run_safe,
safe_requests,
save_file,
short_uid,
start_worker_thread,
synchronized,
timestamp,
timestamp_millis,
to_bytes,
to_str,
unzip,
)
from localstack.utils.docker import DOCKER_CLIENT
from localstack.utils.http_utils import canonicalize_headers, parse_chunked_data
from localstack.utils.run import FuncThread
# logger
LOG = logging.getLogger(__name__)
# name pattern of IAM policies associated with Lambda functions
LAMBDA_POLICY_NAME_PATTERN = "lambda_policy_%s"
# constants
APP_NAME = "lambda_api"
PATH_ROOT = "/2015-03-31"
ARCHIVE_FILE_PATTERN = "%s/lambda.handler.*.jar" % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = "%s/lambda_script_*.py" % config.TMP_FOLDER
LAMBDA_ZIP_FILE_NAME = "original_lambda_archive.zip"
LAMBDA_JAR_FILE_NAME = "original_lambda_archive.jar"
# default timeout in seconds
LAMBDA_DEFAULT_TIMEOUT = 3
INVALID_PARAMETER_VALUE_EXCEPTION = "InvalidParameterValueException"
VERSION_LATEST = "$LATEST"
FUNCTION_MAX_SIZE = 69905067
BATCH_SIZE_RANGES = {
"kafka": (100, 10000),
"kinesis": (100, 10000),
"dynamodb": (100, 1000),
"sqs": (10, 10),
}
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f+00:00"
app = Flask(APP_NAME)
# mutex for access to CWD and ENV
EXEC_MUTEX = threading.RLock()
# whether to use Docker for execution
DO_USE_DOCKER = None
# start characters indicating that a lambda result should be parsed as JSON
JSON_START_CHAR_MAP = {
list: ("[",),
tuple: ("[",),
dict: ("{",),
str: ('"',),
bytes: ('"',),
bool: ("t", "f"),
type(None): ("n",),
int: ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"),
float: ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"),
}
POSSIBLE_JSON_TYPES = (str, bytes)
JSON_START_TYPES = tuple(set(JSON_START_CHAR_MAP.keys()) - set(POSSIBLE_JSON_TYPES))
JSON_START_CHARS = tuple(set(functools.reduce(lambda x, y: x + y, JSON_START_CHAR_MAP.values())))
# lambda executor instance
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(
config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR
)
# IAM policy constants
IAM_POLICY_VERSION = "2012-10-17"
# Whether to check if the handler function exists while creating lambda function
CHECK_HANDLER_ON_CREATION = False
class LambdaRegion(RegionBackend):
# map ARN strings to lambda function objects
lambdas: Dict[str, LambdaFunction]
# map ARN strings to CodeSigningConfig object
code_signing_configs: Dict[str, CodeSigningConfig]
# list of event source mappings for the API
event_source_mappings: List[Dict]
def __init__(self):
self.lambdas = {}
self.code_signing_configs = {}
self.event_source_mappings = []
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
class LambdaContext(object):
DEFAULT_MEMORY_LIMIT = 1536
def __init__(self, func_details, qualifier=None, context=None):
self.function_name = func_details.name()
self.function_version = func_details.get_qualifier_version(qualifier)
self.client_context = context.get("client_context")
self.invoked_function_arn = func_details.arn()
if qualifier:
self.invoked_function_arn += ":" + qualifier
self.cognito_identity = context.get("identity")
self.aws_request_id = str(uuid.uuid4())
self.memory_limit_in_mb = func_details.memory_size or self.DEFAULT_MEMORY_LIMIT
self.log_group_name = "/aws/lambda/%s" % self.function_name
self.log_stream_name = "%s/[1]%s" % (timestamp(format="%Y/%m/%d"), short_uid())
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
class EventSourceListener:
INSTANCES: Dict[str, "EventSourceListener"] = {}
@classmethod
def get(cls, source_type):
# TODO: potentially to be replaced with new plugin loading mechanism...
if not cls.INSTANCES:
for clazz in get_all_subclasses(EventSourceListener):
cls.INSTANCES[clazz.source_type()] = clazz()
return cls.INSTANCES.get(source_type)
@staticmethod
def source_type() -> str:
"""Type discriminator - to be implemented by subclasses."""
raise NotImplementedError
def start(self):
"""Start listener in the background (for polling mode) - to be implemented by subclasses."""
pass
def process_event(self, event: Any):
"""Process the given event (for reactive mode)"""
pass
@staticmethod
def start_listeners(event_source_mapping: Dict):
source_arn = event_source_mapping.get("EventSourceArn") or ""
parts = source_arn.split(":")
service_type = parts[2] if len(parts) > 2 else ""
if not service_type:
self_managed_endpoints = event_source_mapping.get("SelfManagedEventSource", {}).get(
"Endpoints", {}
)
if self_managed_endpoints.get("KAFKA_BOOTSTRAP_SERVERS"):
service_type = "kafka"
instance = EventSourceListener.get(service_type)
if instance:
instance.start()
@staticmethod
def process_event_via_listener(service_type: str, event: Any):
"""Process event for the given service type (for reactive mode)"""
instance = EventSourceListener.get(service_type)
if not instance:
return
def _process(*args):
instance.process_event(event)
# start processing in background
start_worker_thread(_process)
class EventSourceListenerSQS(EventSourceListener):
# SQS listener thread settings
SQS_LISTENER_THREAD: Dict = {}
SQS_POLL_INTERVAL_SEC: float = 1
# Whether to use polling via SQS API (or, alternatively, reactive mode with SQS updates received directly in-memory)
# Advantage of polling is that we can delete messages directly from the queue (via 'ReceiptHandle') after processing
USE_POLLING = True
@staticmethod
def source_type():
return "sqs"
def start(self):
if not self.USE_POLLING:
return
if self.SQS_LISTENER_THREAD:
return
LOG.debug("Starting SQS message polling thread for Lambda API")
self.SQS_LISTENER_THREAD["_thread_"] = thread = FuncThread(self._listener_loop)
thread.start()
def get_matching_event_sources(self) -> List[Dict]:
return get_event_sources(source_arn=r".*:sqs:.*")
def process_event(self, event: Any):
if self.USE_POLLING:
return
# feed message into the first listening lambda (message should only get processed once)
queue_url = event["QueueUrl"]
try:
queue_name = queue_url.rpartition("/")[2]
queue_arn = aws_stack.sqs_queue_arn(queue_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get("FunctionArn") for s in sources]
source = (sources or [None])[0]
if not source:
return False
LOG.debug(
"Found %s source mappings for event from SQS queue %s: %s"
% (len(arns), queue_arn, arns)
)
# TODO: support message BatchSize here, same as for polling mode below
messages = event["Messages"]
self._process_messages_for_event_source(source, messages)
except Exception:
LOG.exception(f"Unable to run Lambda function on SQS messages from queue {queue_url}")
def _listener_loop(self, *args):
while True:
try:
sources = self.get_matching_event_sources()
if not sources:
# Temporarily disable polling if no event sources are configured
# anymore. The loop will get restarted next time a message
# arrives and if an event source is configured.
self.SQS_LISTENER_THREAD.pop("_thread_")
return
unprocessed_messages = {}
sqs_client = aws_stack.connect_to_service(
"sqs",
)
for source in sources:
queue_arn = source["EventSourceArn"]
batch_size = max(min(source.get("BatchSize", 1), 10), 1)
try:
queue_url = aws_stack.sqs_queue_url_for_arn(queue_arn)
messages = unprocessed_messages.pop(queue_arn, None)
if not messages:
result = sqs_client.receive_message(
QueueUrl=queue_url,
MessageAttributeNames=["All"],
MaxNumberOfMessages=batch_size,
)
messages = result.get("Messages")
if not messages:
continue
res = self._process_messages_for_event_source(source, messages)
if not res:
unprocessed_messages[queue_arn] = messages
except Exception as e:
LOG.debug("Unable to poll SQS messages for queue %s: %s" % (queue_arn, e))
except Exception:
pass
finally:
time.sleep(self.SQS_POLL_INTERVAL_SEC)
def _process_messages_for_event_source(self, source, messages):
lambda_arn = source["FunctionArn"]
queue_arn = source["EventSourceArn"]
region_name = queue_arn.split(":")[3]
queue_url = aws_stack.sqs_queue_url_for_arn(queue_arn)
LOG.debug("Sending event from event source %s to Lambda %s" % (queue_arn, lambda_arn))
res = self._send_event_to_lambda(
queue_arn,
queue_url,
lambda_arn,
messages,
region=region_name,
)
return res
def _send_event_to_lambda(self, queue_arn, queue_url, lambda_arn, messages, region):
def delete_messages(result, func_arn, event, error=None, dlq_sent=None, **kwargs):
if error and not dlq_sent:
# Skip deleting messages from the queue in case of processing errors AND if
# the message has not yet been sent to a dead letter queue (DLQ).
# We'll pick them up and retry next time they become available on the queue.
return
sqs_client = aws_stack.connect_to_service("sqs")
entries = [
{"Id": r["receiptHandle"], "ReceiptHandle": r["receiptHandle"]} for r in records
]
try:
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=entries)
except Exception as e:
LOG.info(
"Unable to delete Lambda events from SQS queue "
+ "(please check SQS visibility timeout settings): %s - %s" % (entries, e)
)
records = []
for msg in messages:
message_attrs = message_attributes_to_lower(msg.get("MessageAttributes"))
records.append(
{
"body": msg.get("Body", "MessageBody"),
"receiptHandle": msg.get("ReceiptHandle"),
"md5OfBody": msg.get("MD5OfBody") or msg.get("MD5OfMessageBody"),
"eventSourceARN": queue_arn,
"eventSource": lambda_executors.EVENT_SOURCE_SQS,
"awsRegion": region,
"messageId": msg["MessageId"],
"attributes": msg.get("Attributes", {}),
"messageAttributes": message_attrs,
"md5OfMessageAttributes": msg.get("MD5OfMessageAttributes"),
"sqs": True,
}
)
event = {"Records": records}
# TODO implement retries, based on "RedrivePolicy.maxReceiveCount" in the queue settings
res = run_lambda(
func_arn=lambda_arn,
event=event,
context={},
asynchronous=True,
callback=delete_messages,
)
if isinstance(res, lambda_executors.InvocationResult):
status_code = getattr(res.result, "status_code", 0)
if status_code >= 400:
return False
return True
def cleanup():
region = LambdaRegion.get()
region.lambdas = {}
region.event_source_mappings = []
LAMBDA_EXECUTOR.cleanup()
def func_arn(function_name, remove_qualifier=True):
parts = function_name.split(":function:")
if remove_qualifier and len(parts) > 1:
function_name = "%s:function:%s" % (parts[0], parts[1].split(":")[0])
return aws_stack.lambda_function_arn(function_name)
def func_qualifier(function_name, qualifier=None):
region = LambdaRegion.get()
arn = aws_stack.lambda_function_arn(function_name)
details = region.lambdas.get(arn)
if not details:
return details
if details.qualifier_exists(qualifier):
return "{}:{}".format(arn, qualifier)
return arn
def check_batch_size_range(source_arn, batch_size=None):
source = source_arn.split(":")[2].lower()
source = "kafka" if "secretsmanager" in source else source
batch_size_entry = BATCH_SIZE_RANGES.get(source)
if not batch_size_entry:
raise ValueError(INVALID_PARAMETER_VALUE_EXCEPTION, "Unsupported event source type")
batch_size = batch_size or batch_size_entry[0]
if batch_size > batch_size_entry[1]:
raise ValueError(
INVALID_PARAMETER_VALUE_EXCEPTION,
"BatchSize {} exceeds the max of {}".format(batch_size, batch_size_entry[1]),
)
return batch_size
def build_mapping_obj(data) -> Dict:
mapping = {}
function_name = data["FunctionName"]
enabled = data.get("Enabled", True)
batch_size = data.get("BatchSize")
mapping["UUID"] = str(uuid.uuid4())
mapping["FunctionArn"] = func_arn(function_name)
mapping["LastProcessingResult"] = "OK"
mapping["StateTransitionReason"] = "User action"
mapping["LastModified"] = format_timestamp_for_event_source_mapping()
mapping["State"] = "Enabled" if enabled in [True, None] else "Disabled"
mapping["ParallelizationFactor"] = data.get("ParallelizationFactor") or 1
mapping["Topics"] = data.get("Topics") or []
if "SelfManagedEventSource" in data:
source_arn = data["SourceAccessConfigurations"][0]["URI"]
mapping["SelfManagedEventSource"] = data["SelfManagedEventSource"]
mapping["SourceAccessConfigurations"] = data["SourceAccessConfigurations"]
else:
source_arn = data["EventSourceArn"]
mapping["EventSourceArn"] = source_arn
mapping["StartingPosition"] = LAMBDA_DEFAULT_STARTING_POSITION
batch_size = check_batch_size_range(source_arn, batch_size)
mapping["BatchSize"] = batch_size
return mapping
def format_timestamp(timestamp=None):
timestamp = timestamp or datetime.utcnow()
return isoformat_milliseconds(timestamp) + "+0000"
def format_timestamp_for_event_source_mapping():
# event source mappings seem to use a different time format (required for Terraform compat.)
return datetime.utcnow().timestamp()
def add_event_source(data):
region = LambdaRegion.get()
mapping = build_mapping_obj(data)
region.event_source_mappings.append(mapping)
EventSourceListener.start_listeners(mapping)
return mapping
def update_event_source(uuid_value, data):
region = LambdaRegion.get()
function_name = data.get("FunctionName") or ""
enabled = data.get("Enabled", True)
for mapping in region.event_source_mappings:
if uuid_value == mapping["UUID"]:
if function_name:
mapping["FunctionArn"] = func_arn(function_name)
batch_size = data.get("BatchSize")
if "SelfManagedEventSource" in mapping:
batch_size = check_batch_size_range(
mapping["SourceAccessConfigurations"][0]["URI"],
batch_size or mapping["BatchSize"],
)
else:
batch_size = check_batch_size_range(
mapping["EventSourceArn"], batch_size or mapping["BatchSize"]
)
mapping["State"] = "Enabled" if enabled in [True, None] else "Disabled"
mapping["LastModified"] = format_timestamp_for_event_source_mapping()
mapping["BatchSize"] = batch_size
if "SourceAccessConfigurations" in (mapping and data):
mapping["SourceAccessConfigurations"] = data["SourceAccessConfigurations"]
return mapping
return {}
def delete_event_source(uuid_value):
region = LambdaRegion.get()
for i, m in enumerate(region.event_source_mappings):
if uuid_value == m["UUID"]:
return region.event_source_mappings.pop(i)
return {}
@synchronized(lock=EXEC_MUTEX)
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if "docker" in config.LAMBDA_EXECUTOR:
has_docker = DOCKER_CLIENT.has_docker()
if not has_docker:
LOG.warning(
(
"Lambda executor configured as LAMBDA_EXECUTOR=%s but Docker "
"is not accessible. Please make sure to mount the Docker socket "
"/var/run/docker.sock into the container."
)
% config.LAMBDA_EXECUTOR
)
DO_USE_DOCKER = has_docker
return DO_USE_DOCKER
def fix_proxy_path_params(path_params):
proxy_path_param_value = path_params.get("proxy+")
if not proxy_path_param_value:
return
del path_params["proxy+"]
path_params["proxy"] = proxy_path_param_value
def message_attributes_to_lower(message_attrs):
"""Convert message attribute details (first characters) to lower case (e.g., stringValue, dataType)."""
message_attrs = message_attrs or {}
for _, attr in message_attrs.items():
if not isinstance(attr, dict):
continue
for key, value in dict(attr).items():
attr[first_char_to_lower(key)] = attr.pop(key)
return message_attrs
def process_apigateway_invocation(
func_arn,
path,
payload,
stage,
api_id,
headers={},
is_base64_encoded=False,
resource_path=None,
method=None,
path_params={},
query_string_params=None,
stage_variables={},
request_context={},
event_context={},
):
try:
resource_path = resource_path or path
event = construct_invocation_event(
method, path, headers, payload, query_string_params, is_base64_encoded
)
path_params = dict(path_params)
fix_proxy_path_params(path_params)
event["pathParameters"] = path_params
event["resource"] = resource_path
event["requestContext"] = request_context
event["stageVariables"] = stage_variables
LOG.debug(
"Running Lambda function %s from API Gateway invocation: %s %s"
% (func_arn, method or "GET", path)
)
asynchronous = not config.SYNCHRONOUS_API_GATEWAY_EVENTS
inv_result = run_lambda(
func_arn=func_arn,
event=event,
context=event_context,
asynchronous=asynchronous,
)
return inv_result.result
except Exception as e:
LOG.warning(
"Unable to run Lambda function on API Gateway message: %s %s"
% (e, traceback.format_exc())
)
def construct_invocation_event(
method, path, headers, data, query_string_params={}, is_base64_encoded=False
):
query_string_params = query_string_params or parse_request_data(method, path, "")
# AWS canonicalizes header names, converting them to lower-case
headers = canonicalize_headers(headers)
event = {
"path": path,
"headers": dict(headers),
"multiValueHeaders": multi_value_dict_for_list(headers),
"body": data,
"isBase64Encoded": is_base64_encoded,
"httpMethod": method,
"queryStringParameters": query_string_params,
"multiValueQueryStringParameters": multi_value_dict_for_list(query_string_params),
}
return event
def process_sns_notification(
func_arn,
topic_arn,
subscription_arn,
message,
message_id,
message_attributes,
unsubscribe_url,
subject="",
):
event = {
"Records": [
{
"EventSource": "localstack:sns",
"EventVersion": "1.0",
"EventSubscriptionArn": subscription_arn,
"Sns": {
"Type": "Notification",
"MessageId": message_id,
"TopicArn": topic_arn,
"Subject": subject,
"Message": message,
"Timestamp": timestamp_millis(),
"SignatureVersion": "1",
# TODO Add a more sophisticated solution with an actual signature
# Hardcoded
"Signature": "EXAMPLEpH+..",
"SigningCertUrl": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-000000000.pem",
"UnsubscribeUrl": unsubscribe_url,
"MessageAttributes": message_attributes,
},
}
]
}
inv_result = run_lambda(
func_arn=func_arn,
event=event,
context={},
asynchronous=not config.SYNCHRONOUS_SNS_EVENTS,
)
return inv_result.result
def process_kinesis_records(records, stream_name):
def chunks(lst, n):
# Yield successive n-sized chunks from lst.
for i in range(0, len(lst), n):
yield lst[i : i + n]
# feed records into listening lambdas
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source["FunctionArn"]
for chunk in chunks(records, source["BatchSize"]):
shard_id = "shardId-000000000000"
event = {
"Records": [
{
"eventID": "{0}:{1}".format(shard_id, rec["sequenceNumber"]),
"eventSourceARN": stream_arn,
"eventSource": "aws:kinesis",
"eventVersion": "1.0",
"eventName": "aws:kinesis:record",
"invokeIdentityArn": "arn:aws:iam::{0}:role/lambda-role".format(
TEST_AWS_ACCOUNT_ID
),
"awsRegion": aws_stack.get_region(),
"kinesis": rec,
}
for rec in chunk
]
}
lock_discriminator = None
if not config.SYNCHRONOUS_KINESIS_EVENTS:
lock_discriminator = f"{stream_arn}/{shard_id}"
lambda_executors.LAMBDA_ASYNC_LOCKS.assure_lock_present(
lock_discriminator, BoundedSemaphore(source["ParallelizationFactor"])
)
run_lambda(
func_arn=arn,
event=event,
context={},
asynchronous=not config.SYNCHRONOUS_KINESIS_EVENTS,
lock_discriminator=lock_discriminator,
)
except Exception as e:
LOG.warning(
"Unable to run Lambda function on Kinesis records: %s %s" % (e, traceback.format_exc())
)
def get_event_sources(func_name=None, source_arn=None):
region = LambdaRegion.get()
result = []
for m in region.event_source_mappings:
if not func_name or (m["FunctionArn"] in [func_name, func_arn(func_name)]):
if _arn_match(mapped=m.get("EventSourceArn"), searched=source_arn):
result.append(m)
return result
def _arn_match(mapped, searched):
if not mapped:
return False
if not searched or mapped == searched:
return True
# Some types of ARNs can end with a path separated by slashes, for
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# a little counterintuitive that a more specific mapped ARN can
# match a less specific ARN on the event, but some integration tests
# rely on it for things like subscribing to a stream and matching an
# event labeled with the table ARN.
if re.match(r"^%s$" % searched, mapped):
return True
if mapped.startswith(searched):
suffix = mapped[len(searched) :]
return suffix[0] == "/"
return False
def get_function_version(arn, version):
region = LambdaRegion.get()
func = region.lambdas.get(arn)
return format_func_details(func, version=version, always_add_version=True)
def publish_new_function_version(arn):
region = LambdaRegion.get()
func_details = region.lambdas.get(arn)
versions = func_details.versions
max_version_number = func_details.max_version()
next_version_number = max_version_number + 1
latest_hash = versions.get(VERSION_LATEST).get("CodeSha256")
max_version = versions.get(str(max_version_number))
max_version_hash = max_version.get("CodeSha256") if max_version else ""
if latest_hash != max_version_hash:
versions[str(next_version_number)] = {
"CodeSize": versions.get(VERSION_LATEST).get("CodeSize"),
"CodeSha256": versions.get(VERSION_LATEST).get("CodeSha256"),
"Function": versions.get(VERSION_LATEST).get("Function"),
"RevisionId": str(uuid.uuid4()),
}
max_version_number = next_version_number
return get_function_version(arn, str(max_version_number))
def do_list_versions(arn):
region = LambdaRegion.get()
versions = [
get_function_version(arn, version) for version in region.lambdas.get(arn).versions.keys()
]
return sorted(versions, key=lambda k: str(k.get("Version")))
def do_update_alias(arn, alias, version, description=None):
region = LambdaRegion.get()
new_alias = {
"AliasArn": arn + ":" + alias,
"FunctionVersion": version,
"Name": alias,
"Description": description or "",
"RevisionId": str(uuid.uuid4()),
}
region.lambdas.get(arn).aliases[alias] = new_alias
return new_alias
def run_lambda(
func_arn,
event,
context={},
version=None,
suppress_output=False,
asynchronous=False,
callback=None,
lock_discriminator: str = None,
):
region_name = func_arn.split(":")[3]
region = LambdaRegion.get(region_name)
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
try:
func_arn = aws_stack.fix_arn(func_arn)
func_details = region.lambdas.get(func_arn)
if not func_details:
LOG.debug("Unable to find details for Lambda %s in region %s" % (func_arn, region_name))
result = not_found_error(msg="The resource specified in the request does not exist.")
return lambda_executors.InvocationResult(result)
context = LambdaContext(func_details, version, context)
# forward invocation to external endpoint, if configured
invocation_type = "Event" if asynchronous else "RequestResponse"
invoke_result = forward_to_external_url(func_details, event, context, invocation_type)
if invoke_result is not None:
return invoke_result
result = LAMBDA_EXECUTOR.execute(
func_arn,
func_details,
event,
context=context,
version=version,
asynchronous=asynchronous,
callback=callback,
lock_discriminator=lock_discriminator,
)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
response = {
"errorType": str(exc_type.__name__),
"errorMessage": str(e),
"stackTrace": traceback.format_tb(exc_traceback),
}
LOG.info(
"Error executing Lambda function %s: %s %s" % (func_arn, e, traceback.format_exc())
)
log_output = e.log_output if isinstance(e, lambda_executors.InvocationException) else ""
return lambda_executors.InvocationResult(
Response(json.dumps(response), status=500), log_output
)
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def load_source(name, file):
return importlib.machinery.SourceFileLoader(name, file).load_module()
def exec_lambda_code(script, handler_function="handler", lambda_cwd=None, lambda_env=None):
# TODO: The code in this function is generally not thread-safe and potentially insecure
# (e.g., mutating environment variables, and globally loaded modules). Should be redesigned.
def _do_exec_lambda_code():
if lambda_cwd or lambda_env:
if lambda_cwd:
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if lambda_env:
previous_env = dict(os.environ)
os.environ.update(lambda_env)
# generate lambda file name
lambda_id = "l_%s" % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace("*", lambda_id)
save_file(lambda_file, script)
# delete temporary .py and .pyc files on exit
TMP_FILES.append(lambda_file)
TMP_FILES.append("%sc" % lambda_file)
try:
pre_sys_modules_keys = set(sys.modules.keys())
# set default env variables required for most Lambda handlers
env_vars_before = lambda_executors.LambdaExecutorLocal.set_default_env_variables()
try:
handler_module = load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
finally:
lambda_executors.LambdaExecutorLocal.reset_default_env_variables(env_vars_before)
# the above import can bring files for the function
# (eg settings.py) into the global namespace. subsequent
# calls can pick up file from another function, causing
# general issues.
post_sys_modules_keys = set(sys.modules.keys())
for key in post_sys_modules_keys:
if key not in pre_sys_modules_keys:
sys.modules.pop(key)
except Exception as e:
LOG.error("Unable to exec: %s %s" % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd or lambda_env:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
if lambda_env:
os.environ = previous_env
return module_vars[handler_function]
lock = EXEC_MUTEX if lambda_cwd or lambda_env else empty_context_manager()
with lock:
return _do_exec_lambda_code()
def get_handler_function_from_name(handler_name, runtime=None):
runtime = runtime or LAMBDA_DEFAULT_RUNTIME
if runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
return handler_name.split(":")[-1]
return handler_name.split(".")[-1]
def error_response(msg, code=500, error_type="InternalFailure"):
LOG.debug(msg)
return aws_responses.flask_error_response_json(msg, code=code, error_type=error_type)
def get_zip_bytes(function_code):
"""Returns the ZIP file contents from a FunctionCode dict.
:type function_code: dict
:param function_code: https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionCode.html
:returns: bytes of the Zip file.
"""
function_code = function_code or {}
if "S3Bucket" in function_code:
s3_client = aws_stack.connect_to_service("s3")
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code["S3Bucket"], function_code["S3Key"], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError("Unable to fetch Lambda archive from S3: %s" % e, 404)
elif "ZipFile" in function_code:
zip_file_content = function_code["ZipFile"]
zip_file_content = base64.b64decode(zip_file_content)
elif "ImageUri" in function_code:
zip_file_content = None
else:
raise ClientError("No valid Lambda archive specified: %s" % list(function_code.keys()))
return zip_file_content
def get_java_handler(zip_file_content, main_file, func_details=None):
"""Creates a Java handler from an uploaded ZIP or JAR.
:type zip_file_content: bytes
:param zip_file_content: ZIP file bytes.
:type handler: str
:param handler: The lambda handler path.
:type main_file: str
:param main_file: Filepath to the uploaded ZIP or JAR file.
:returns: function or flask.Response
"""
if is_zip_file(zip_file_content):
def execute(event, context):
result = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
event, context, main_file=main_file, func_details=func_details
)
return result
return execute
raise ClientError(
error_response(
"Unable to extract Java Lambda handler - file is not a valid zip/jar file (%s, %s bytes)"
% (main_file, len(zip_file_content or "")),
400,
error_type="ValidationError",
)
)
def set_archive_code(code, lambda_name, zip_file_content=None):
region = LambdaRegion.get()
# get metadata
lambda_arn = func_arn(lambda_name)
lambda_details = region.lambdas[lambda_arn]
is_local_mount = code.get("S3Bucket") == config.BUCKET_MARKER_LOCAL
if is_local_mount and config.LAMBDA_REMOTE_DOCKER:
msg = 'Please note that Lambda mounts (bucket name "%s") cannot be used with LAMBDA_REMOTE_DOCKER=1'
raise Exception(msg % config.BUCKET_MARKER_LOCAL)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(lambda_arn)
if is_local_mount:
# Mount or use a local folder lambda executors can reference
# WARNING: this means we're pointing lambda_cwd to a local path in the user's
# file system! We must ensure that there is no data loss (i.e., we must *not* add
# this folder to TMP_FILES or similar).
lambda_details.cwd = code.get("S3Key")
return code["S3Key"]
# get file content
zip_file_content = zip_file_content or get_zip_bytes(code)
if not zip_file_content:
return
# Save the zip file to a temporary file that the lambda executors can reference
code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest())
latest_version = lambda_details.get_version(VERSION_LATEST)
latest_version["CodeSize"] = len(zip_file_content)
latest_version["CodeSha256"] = code_sha_256.decode("utf-8")
tmp_dir = "%s/zipfile.%s" % (config.TMP_FOLDER, short_uid())
mkdir(tmp_dir)
tmp_file = "%s/%s" % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_details.cwd = tmp_dir
return tmp_dir
def set_function_code(lambda_function: LambdaFunction):
def _set_and_configure():
lambda_handler = do_set_function_code(lambda_function)
lambda_function.versions.get(VERSION_LATEST)["Function"] = lambda_handler
# initialize function code via plugins
for plugin in lambda_executors.LambdaExecutorPlugin.get_plugins():
plugin.init_function_code(lambda_function)
# unzipping can take some time - limit the execution time to avoid client/network timeout issues
run_for_max_seconds(config.LAMBDA_CODE_EXTRACT_TIME, _set_and_configure)
return {"FunctionName": lambda_function.name()}
def do_set_function_code(lambda_function: LambdaFunction):
def generic_handler(*_):
raise ClientError(
(
'Unable to find executor for Lambda function "%s". Note that '
+ "Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker"
)
% lambda_name
)
region = LambdaRegion.get()
lambda_name = lambda_function.name()
arn = lambda_function.arn()
lambda_details = region.lambdas[arn]
runtime = get_lambda_runtime(lambda_details)
lambda_environment = lambda_details.envvars
handler_name = lambda_details.handler = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
code_passed = lambda_function.code
is_local_mount = code_passed.get("S3Bucket") == config.BUCKET_MARKER_LOCAL
zip_file_content = None
lambda_cwd = lambda_function.cwd
LAMBDA_EXECUTOR.cleanup(arn)
if code_passed:
lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
if not is_local_mount:
# Save the zip file to a temporary file that the lambda executors can reference
zip_file_content = get_zip_bytes(code_passed)
else:
lambda_cwd = lambda_cwd or lambda_details.cwd
if not lambda_cwd:
return
# get local lambda working directory
tmp_file = os.path.join(lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode="rb")
# Set the appropriate lambda handler.
lambda_handler = generic_handler
is_java = lambda_executors.is_java_lambda(runtime)
if is_java:
# The Lambda executors for Docker subclass LambdaExecutorContainers, which
# runs Lambda in Docker by passing all *.jar files in the function working
# directory as part of the classpath. Obtain a Java handler function below.
try:
lambda_handler = get_java_handler(
zip_file_content, tmp_file, func_details=lambda_details
)
except Exception as e:
# this can happen, e.g., for Lambda code mounted via __local__ -> ignore
LOG.debug("Unable to determine Lambda Java handler: %s", e)
if not is_local_mount:
# Lambda code must be uploaded in Zip format
if not is_zip_file(zip_file_content):
raise ClientError(
"Uploaded Lambda code for runtime ({}) is not in Zip format".format(runtime)
)
# Unzip the Lambda archive contents
unzip(tmp_file, lambda_cwd)
# Obtain handler details for any non-Java Lambda function
if not is_java:
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
main_file = "%s/%s" % (lambda_cwd, handler_file)
if CHECK_HANDLER_ON_CREATION and not os.path.exists(main_file):
# Raise an error if (1) this is not a local mount lambda, or (2) we're
# running Lambdas locally (not in Docker), or (3) we're using remote Docker.
# -> We do *not* want to raise an error if we're using local mount in non-remote Docker
if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
config_debug = 'Config for local mount, docker, remote: "%s", "%s", "%s"' % (
is_local_mount,
use_docker(),
config.LAMBDA_REMOTE_DOCKER,
)
LOG.debug("Lambda archive content:\n%s" % file_list)
raise ClientError(
error_response(
"Unable to find handler script (%s) in Lambda archive. %s"
% (main_file, config_debug),
400,
error_type="ValidationError",
)
)
if runtime.startswith("python") and not use_docker():
try:
# make sure the file is actually readable, then read contents
ensure_readable(main_file)
zip_file_content = load_file(main_file, mode="rb")
# extract handler
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
lambda_handler = exec_lambda_code(
zip_file_content,
handler_function=handler_function,
lambda_cwd=lambda_cwd,
lambda_env=lambda_environment,
)
except Exception as e:
raise ClientError("Unable to get handler function from lambda code: %s" % e)
if runtime.startswith("node") and not use_docker():
ensure_readable(main_file)
def execute(event, context):
result = lambda_executors.EXECUTOR_LOCAL.execute_javascript_lambda(
event, context, main_file=main_file, func_details=lambda_details
)
return result
lambda_handler = execute
if runtime.startswith("go1") and not use_docker():
install_go_lambda_runtime()
ensure_readable(main_file)
def execute_go(event, context):
result = lambda_executors.EXECUTOR_LOCAL.execute_go_lambda(
event, context, main_file=main_file, func_details=lambda_details
)
return result
lambda_handler = execute_go
return lambda_handler
def do_list_functions():
funcs = []
region = LambdaRegion.get()
this_region = aws_stack.get_region()
for f_arn, func in region.lambdas.items():
if type(func) != LambdaFunction:
continue
# filter out functions of current region
func_region = f_arn.split(":")[3]
if func_region != this_region:
continue
func_name = f_arn.split(":function:")[-1]
arn = func_arn(func_name)
func_details = region.lambdas.get(arn)
if not func_details:
# this can happen if we're accessing Lambdas from a different region (ARN mismatch)
continue
details = format_func_details(func_details)
details["Tags"] = func.tags
funcs.append(details)
return funcs
def format_func_details(func_details, version=None, always_add_version=False):
version = version or VERSION_LATEST
func_version = func_details.get_version(version)
result = {
"CodeSha256": func_version.get("CodeSha256"),
"Role": func_details.role,
"KMSKeyArn": func_details.kms_key_arn,
"Version": version,
"VpcConfig": func_details.vpc_config,
"FunctionArn": func_details.arn(),
"FunctionName": func_details.name(),
"CodeSize": func_version.get("CodeSize"),
"Handler": func_details.handler,
"Runtime": func_details.runtime,
"Timeout": func_details.timeout,
"Description": func_details.description,
"MemorySize": func_details.memory_size,
"LastModified": format_timestamp(func_details.last_modified),
"TracingConfig": func_details.tracing_config or {"Mode": "PassThrough"},
"RevisionId": func_version.get("RevisionId"),
"State": "Active",
"LastUpdateStatus": "Successful",
"PackageType": func_details.package_type,
"ImageConfig": getattr(func_details, "image_config", None),
}
if func_details.dead_letter_config:
result["DeadLetterConfig"] = func_details.dead_letter_config
if func_details.envvars:
result["Environment"] = {"Variables": func_details.envvars}
if (always_add_version or version != VERSION_LATEST) and len(
result["FunctionArn"].split(":")
) <= 7:
result["FunctionArn"] += ":%s" % version
return result
def forward_to_external_url(func_details, event, context, invocation_type):
func_forward_url = (
func_details.envvars.get("LOCALSTACK_LAMBDA_FORWARD_URL") or config.LAMBDA_FORWARD_URL
)
"""If LAMBDA_FORWARD_URL is configured, forward the invocation of this Lambda to the configured URL."""
if not func_forward_url:
return
func_name = func_details.name()
url = "%s%s/functions/%s/invocations" % (
func_forward_url,
PATH_ROOT,
func_name,
)
copied_env_vars = func_details.envvars.copy()
copied_env_vars["LOCALSTACK_HOSTNAME"] = config.HOSTNAME_EXTERNAL
copied_env_vars["LOCALSTACK_EDGE_PORT"] = str(config.EDGE_PORT)
headers = aws_stack.mock_aws_request_headers("lambda")
headers["X-Amz-Region"] = func_details.region()
headers["X-Amz-Request-Id"] = context.aws_request_id
headers["X-Amz-Handler"] = func_details.handler
headers["X-Amz-Function-ARN"] = context.invoked_function_arn
headers["X-Amz-Function-Name"] = context.function_name
headers["X-Amz-Function-Version"] = context.function_version
headers["X-Amz-Role"] = func_details.role
headers["X-Amz-Runtime"] = func_details.runtime
headers["X-Amz-Timeout"] = str(func_details.timeout)
headers["X-Amz-Memory-Size"] = str(context.memory_limit_in_mb)
headers["X-Amz-Log-Group-Name"] = context.log_group_name
headers["X-Amz-Log-Stream-Name"] = context.log_stream_name
headers["X-Amz-Env-Vars"] = json.dumps(copied_env_vars)
headers["X-Amz-Last-Modified"] = str(int(func_details.last_modified.timestamp() * 1000))
headers["X-Amz-Invocation-Type"] = invocation_type
headers["X-Amz-Log-Type"] = "Tail"
if context.client_context:
headers["X-Amz-Client-Context"] = context.client_context
if context.cognito_identity:
headers["X-Amz-Cognito-Identity"] = context.cognito_identity
data = json.dumps(json_safe(event)) if isinstance(event, dict) else str(event)
LOG.debug("Forwarding Lambda invocation to LAMBDA_FORWARD_URL: %s" % config.LAMBDA_FORWARD_URL)
result = safe_requests.post(url, data, headers=headers)
if result.status_code >= 400:
raise Exception(
"Received error status code %s from external Lambda invocation" % result.status_code
)
content = run_safe(lambda: to_str(result.content)) or result.content
LOG.debug(
"Received result from external Lambda endpoint (status %s): %s"
% (result.status_code, content)
)
result = lambda_executors.InvocationResult(content)
return result
def forward_to_fallback_url(func_arn, data):
"""If LAMBDA_FALLBACK_URL is configured, forward the invocation of this non-existing
Lambda to the configured URL."""
if not config.LAMBDA_FALLBACK_URL:
return
lambda_name = aws_stack.lambda_function_name(func_arn)
if config.LAMBDA_FALLBACK_URL.startswith("dynamodb://"):
table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace("dynamodb://", "http://")).netloc
dynamodb = aws_stack.connect_to_service("dynamodb")
item = {
"id": {"S": short_uid()},
"timestamp": {"N": str(now_utc())},
"payload": {"S": data},
"function_name": {"S": lambda_name},
}
aws_stack.create_dynamodb_table(table_name, partition_key="id")
dynamodb.put_item(TableName=table_name, Item=item)
return ""
if re.match(r"^https?://.+", config.LAMBDA_FALLBACK_URL):
headers = {
"lambda-function-name": lambda_name,
"Content-Type": APPLICATION_JSON,
}
response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data, headers=headers)
content = response.content
try:
# parse the response into a dictionary to get details
# like function error etc.
content = json.loads(content)
except Exception:
pass
return content
raise ClientError("Unexpected value for LAMBDA_FALLBACK_URL: %s" % config.LAMBDA_FALLBACK_URL)
def get_lambda_policy(function, qualifier=None):
iam_client = aws_stack.connect_to_service("iam")
policies = iam_client.list_policies(Scope="Local", MaxItems=500)["Policies"]
docs = []
for p in policies:
# !TODO: Cache policy documents instead of running N+1 API calls here!
versions = iam_client.list_policy_versions(PolicyArn=p["Arn"])["Versions"]
default_version = [v for v in versions if v.get("IsDefaultVersion")]
versions = default_version or versions
doc = versions[0]["Document"]
doc = doc if isinstance(doc, dict) else json.loads(doc)
if not isinstance(doc["Statement"], list):
doc["Statement"] = [doc["Statement"]]
for stmt in doc["Statement"]:
stmt["Principal"] = stmt.get("Principal") or {"AWS": TEST_AWS_ACCOUNT_ID}
doc["PolicyArn"] = p["Arn"]
doc["Id"] = "default"
docs.append(doc)
res_qualifier = func_qualifier(function, qualifier)
policy = [d for d in docs if d["Statement"][0]["Resource"] == res_qualifier]
return (policy or [None])[0]
def lookup_function(function, region, request_url):
result = {
"Configuration": function,
"Code": {"Location": "%s/code" % request_url},
"Tags": function["Tags"],
}
lambda_details = region.lambdas.get(function["FunctionArn"])
if lambda_details.concurrency is not None:
result["Concurrency"] = lambda_details.concurrency
return jsonify(result)
def not_found_error(ref=None, msg=None):
if not msg:
msg = "The resource you requested does not exist."
if ref:
msg = "%s not found: %s" % (
"Function" if ":function:" in ref else "Resource",
ref,
)
return error_response(msg, 404, error_type="ResourceNotFoundException")
# ------------
# API METHODS
# ------------
@app.before_request
def before_request():
# fix to enable chunked encoding, as this is used by some Lambda clients
transfer_encoding = request.headers.get("Transfer-Encoding", "").lower()
if transfer_encoding == "chunked":
request.environ["wsgi.input_terminated"] = True
@app.route("%s/functions" % PATH_ROOT, methods=["POST"])
def create_function():
"""Create new function
---
operationId: 'createFunction'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
arn = "n/a"
try:
if len(request.data) > FUNCTION_MAX_SIZE:
return error_response(
"Request size (%s) must be smaller than %s bytes for the CreateFunction operation"
% (len(request.data), FUNCTION_MAX_SIZE),
413,
error_type="RequestEntityTooLargeException",
)
data = json.loads(to_str(request.data))
lambda_name = data["FunctionName"]
event_publisher.fire_event(
event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={"n": event_publisher.get_hash(lambda_name)},
)
arn = func_arn(lambda_name)
if arn in region.lambdas:
return error_response(
"Function already exist: %s" % lambda_name,
409,
error_type="ResourceConflictException",
)
region.lambdas[arn] = func_details = LambdaFunction(arn)
func_details.versions = {VERSION_LATEST: {"RevisionId": str(uuid.uuid4())}}
func_details.vpc_config = data.get("VpcConfig", {})
func_details.last_modified = datetime.utcnow()
func_details.description = data.get("Description", "")
func_details.handler = data.get("Handler")
func_details.runtime = data.get("Runtime")
func_details.envvars = data.get("Environment", {}).get("Variables", {})
func_details.tags = data.get("Tags", {})
func_details.timeout = data.get("Timeout", LAMBDA_DEFAULT_TIMEOUT)
func_details.role = data["Role"]
func_details.kms_key_arn = data.get("KMSKeyArn")
# Oddity in Lambda API (discovered when testing against Terraform test suite)
# See https://github.com/hashicorp/terraform-provider-aws/issues/6366
if not func_details.envvars:
func_details.kms_key_arn = None
func_details.memory_size = data.get("MemorySize")
func_details.code_signing_config_arn = data.get("CodeSigningConfigArn")
func_details.code = data["Code"]
func_details.package_type = data.get("PackageType") or "Zip"
func_details.image_config = data.get("ImageConfig", {})
func_details.tracing_config = data.get("TracingConfig", {})
func_details.set_dead_letter_config(data)
result = set_function_code(func_details)
if isinstance(result, Response):
del region.lambdas[arn]
return result
# remove content from code attribute, if present
func_details.code.pop("ZipFile", None)
# prepare result
result.update(format_func_details(func_details))
if data.get("Publish"):
result["Version"] = publish_new_function_version(arn)["Version"]
return jsonify(result or {})
except Exception as e:
region.lambdas.pop(arn, None)
if isinstance(e, ClientError):
return e.get_response()
return error_response("Unknown error: %s %s" % (e, traceback.format_exc()))
@app.route("%s/functions/<function>" % PATH_ROOT, methods=["GET"])
def get_function(function):
"""Get details for a single function
---
operationId: 'getFunction'
parameters:
- name: 'request'
in: body
- name: 'function'
in: path
"""
region = LambdaRegion.get()
funcs = do_list_functions()
for func in funcs:
if function == func["FunctionName"]:
return lookup_function(func, region, request.url)
elif function in func["FunctionArn"]:
return lookup_function(func, region, request.url)
return not_found_error(func_arn(function))
@app.route("%s/functions/" % PATH_ROOT, methods=["GET"])
def list_functions():
"""List functions
---
operationId: 'listFunctions'
parameters:
- name: 'request'
in: body
"""
funcs = do_list_functions()
result = {"Functions": funcs}
return jsonify(result)
@app.route("%s/functions/<function>" % PATH_ROOT, methods=["DELETE"])
def delete_function(function):
"""Delete an existing function
---
operationId: 'deleteFunction'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
try:
region.lambdas.pop(arn)
except KeyError:
return not_found_error(func_arn(function))
event_publisher.fire_event(
event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={"n": event_publisher.get_hash(function)},
)
i = 0
while i < len(region.event_source_mappings):
mapping = region.event_source_mappings[i]
if mapping["FunctionArn"] == arn:
del region.event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route("%s/functions/<function>/code" % PATH_ROOT, methods=["PUT"])
def update_function_code(function):
"""Update the code of an existing function
---
operationId: 'updateFunctionCode'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
arn = func_arn(function)
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error("Function not found: %s" % arn)
data = json.loads(to_str(request.data))
func_details.code = data
result = set_function_code(func_details)
if isinstance(result, Response):
return result
func_details.last_modified = datetime.utcnow()
result.update(format_func_details(func_details))
if data.get("Publish"):
result["Version"] = publish_new_function_version(arn)["Version"]
return jsonify(result or {})
@app.route("%s/functions/<function>/code" % PATH_ROOT, methods=["GET"])
def get_function_code(function):
"""Get the code of an existing function
---
operationId: 'getFunctionCode'
parameters:
"""
region = LambdaRegion.get()
arn = func_arn(function)
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
lambda_cwd = func_details.cwd
tmp_file = "%s/%s" % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(
load_file(tmp_file, mode="rb"),
mimetype="application/zip",
headers={"Content-Disposition": "attachment; filename=lambda_archive.zip"},
)
@app.route("%s/functions/<function>/configuration" % PATH_ROOT, methods=["GET"])
def get_function_configuration(function):
"""Get the configuration of an existing function
---
operationId: 'getFunctionConfiguration'
parameters:
"""
region = LambdaRegion.get()
arn = func_arn(function)
lambda_details = region.lambdas.get(arn)
if not lambda_details:
return not_found_error(arn)
result = format_func_details(lambda_details)
return jsonify(result)
@app.route("%s/functions/<function>/configuration" % PATH_ROOT, methods=["PUT"])
def update_function_configuration(function):
"""Update the configuration of an existing function
---
operationId: 'updateFunctionConfiguration'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
data = json.loads(to_str(request.data))
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
lambda_details = region.lambdas.get(arn)
if not lambda_details:
return not_found_error('Unable to find Lambda function ARN "%s"' % arn)
if data.get("Handler"):
lambda_details.handler = data["Handler"]
if data.get("Runtime"):
lambda_details.runtime = data["Runtime"]
lambda_details.set_dead_letter_config(data)
env_vars = data.get("Environment", {}).get("Variables")
if env_vars is not None:
lambda_details.envvars = env_vars
if data.get("Timeout"):
lambda_details.timeout = data["Timeout"]
if data.get("Role"):
lambda_details.role = data["Role"]
if data.get("MemorySize"):
lambda_details.memory_size = data["MemorySize"]
if data.get("Description"):
lambda_details.description = data["Description"]
if data.get("VpcConfig"):
lambda_details.vpc_config = data["VpcConfig"]
if data.get("KMSKeyArn"):
lambda_details.kms_key_arn = data["KMSKeyArn"]
if data.get("TracingConfig"):
lambda_details.tracing_config = data["TracingConfig"]
lambda_details.last_modified = datetime.utcnow()
result = data
func_details = region.lambdas.get(arn)
result.update(format_func_details(func_details))
# initialize plugins
for plugin in lambda_executors.LambdaExecutorPlugin.get_plugins():
plugin.init_function_configuration(func_details)
return jsonify(result)
def generate_policy_statement(sid, action, arn, sourcearn, principal):
statement = {
"Sid": sid,
"Effect": "Allow",
"Action": action,
"Resource": arn,
}
# Adds SourceArn only if SourceArn is present
if sourcearn:
condition = {"ArnLike": {"AWS:SourceArn": sourcearn}}
statement["Condition"] = condition
# Adds Principal only if Principal is present
if principal:
principal = {"Service": principal}
statement["Principal"] = principal
return statement
def generate_policy(sid, action, arn, sourcearn, principal):
new_statement = generate_policy_statement(sid, action, arn, sourcearn, principal)
policy = {
"Version": IAM_POLICY_VERSION,
"Id": "LambdaFuncAccess-%s" % sid,
"Statement": [new_statement],
}
return policy
@app.route("%s/functions/<function>/policy" % PATH_ROOT, methods=["POST"])
def add_permission(function):
arn = func_arn(function)
qualifier = request.args.get("Qualifier")
q_arn = func_qualifier(function, qualifier)
result = add_permission_policy_statement(function, arn, q_arn)
return result
def add_permission_policy_statement(resource_name, resource_arn, resource_arn_qualified):
region = LambdaRegion.get()
data = json.loads(to_str(request.data))
iam_client = aws_stack.connect_to_service("iam")
sid = data.get("StatementId")
action = data.get("Action")
principal = data.get("Principal")
sourcearn = data.get("SourceArn")
previous_policy = get_lambda_policy(resource_name)
if resource_arn not in region.lambdas:
return not_found_error(resource_arn)
if not re.match(r"lambda:[*]|lambda:[a-zA-Z]+|[*]", action):
return error_response(
'1 validation error detected: Value "%s" at "action" failed to satisfy '
"constraint: Member must satisfy regular expression pattern: "
"(lambda:[*]|lambda:[a-zA-Z]+|[*])" % action,
400,
error_type="ValidationException",
)
new_policy = generate_policy(sid, action, resource_arn_qualified, sourcearn, principal)
if previous_policy:
statment_with_sid = next(
(statement for statement in previous_policy["Statement"] if statement["Sid"] == sid),
None,
)
if statment_with_sid:
msg = (
"The statement id (%s) provided already exists. Please provide a new statement id, "
"or remove the existing statement."
) % sid
return error_response(msg, 400, error_type="ResourceConflictException")
new_policy["Statement"].extend(previous_policy["Statement"])
iam_client.delete_policy(PolicyArn=previous_policy["PolicyArn"])
policy_name = LAMBDA_POLICY_NAME_PATTERN % resource_name
LOG.debug('Creating IAM policy "%s" for Lambda resource %s' % (policy_name, resource_arn))
iam_client.create_policy(
PolicyName=policy_name,
PolicyDocument=json.dumps(new_policy),
Description='Policy for Lambda function "%s"' % resource_name,
)
result = {"Statement": json.dumps(new_policy["Statement"][0])}
return jsonify(result)
@app.route("%s/functions/<function>/policy/<statement>" % PATH_ROOT, methods=["DELETE"])
def remove_permission(function, statement):
qualifier = request.args.get("Qualifier")
iam_client = aws_stack.connect_to_service("iam")
policy = get_lambda_policy(function)
if not policy:
return not_found_error('Unable to find policy for Lambda function "%s"' % function)
iam_client.delete_policy(PolicyArn=policy["PolicyArn"])
result = {
"FunctionName": function,
"Qualifier": qualifier,
"StatementId": policy["Statement"][0]["Sid"],
}
return jsonify(result)
@app.route("%s/functions/<function>/policy" % PATH_ROOT, methods=["GET"])
def get_policy(function):
qualifier = request.args.get("Qualifier")
policy = get_lambda_policy(function, qualifier)
if not policy:
return not_found_error("The resource you requested does not exist.")
return jsonify({"Policy": json.dumps(policy), "RevisionId": "test1234"})
@app.route("%s/functions/<function>/invocations" % PATH_ROOT, methods=["POST"])
def invoke_function(function):
"""Invoke an existing function
---
operationId: 'invokeFunction'
parameters:
- name: 'request'
in: body
"""
# function here can either be an arn or a function name
arn = func_arn(function)
# ARN can also contain a qualifier, extract it from there if so
m = re.match("(arn:aws:lambda:.*:.*:function:[a-zA-Z0-9-_]+)(:.*)?", arn)
if m and m.group(2):
qualifier = m.group(2)[1:]
arn = m.group(1)
else:
qualifier = request.args.get("Qualifier")
data = request.get_data() or ""
if data:
try:
data = to_str(data)
data = json.loads(data)
except Exception:
try:
# try to read chunked content
data = json.loads(parse_chunked_data(data))
except Exception:
return error_response(
"The payload is not JSON: %s" % data,
415,
error_type="UnsupportedMediaTypeException",
)
# Default invocation type is RequestResponse
invocation_type = request.headers.get("X-Amz-Invocation-Type", "RequestResponse")
log_type = request.headers.get("X-Amz-Log-Type")
def _create_response(invocation_result, status_code=200, headers={}):
"""Create the final response for the given invocation result."""
if not isinstance(invocation_result, lambda_executors.InvocationResult):
invocation_result = lambda_executors.InvocationResult(invocation_result)
result = invocation_result.result
log_output = invocation_result.log_output
details = {"StatusCode": status_code, "Payload": result, "Headers": headers}
if isinstance(result, Response):
details["Payload"] = to_str(result.data)
if result.status_code >= 400:
details["FunctionError"] = "Unhandled"
if isinstance(result, (str, bytes)):
try:
result = json.loads(to_str(result))
except Exception:
pass
if isinstance(result, dict):
for key in ("StatusCode", "Payload", "FunctionError"):
if result.get(key):
details[key] = result[key]
# Try to parse payload as JSON
was_json = False
payload = details["Payload"]
if payload and isinstance(payload, POSSIBLE_JSON_TYPES) and payload[0] in JSON_START_CHARS:
try:
details["Payload"] = json.loads(details["Payload"])
was_json = True
except Exception:
pass
# Set error headers
if details.get("FunctionError"):
details["Headers"]["X-Amz-Function-Error"] = str(details["FunctionError"])
# LogResult contains the last 4KB (~4k characters) of log outputs
logs = log_output[-4000:] if log_type == "Tail" else ""
details["Headers"]["X-Amz-Log-Result"] = base64.b64encode(to_bytes(logs))
details["Headers"]["X-Amz-Executed-Version"] = str(qualifier or VERSION_LATEST)
# Construct response object
response_obj = details["Payload"]
if was_json or isinstance(response_obj, JSON_START_TYPES):
response_obj = json_safe(response_obj)
# Content-type header is not required since jsonify automatically adds it
response_obj = jsonify(response_obj)
else:
response_obj = str(response_obj)
details["Headers"]["Content-Type"] = "text/plain"
return response_obj, details["StatusCode"], details["Headers"]
# check if this lambda function exists
not_found = None
region = LambdaRegion.get()
if arn not in region.lambdas:
not_found = not_found_error(arn)
elif qualifier and not region.lambdas.get(arn).qualifier_exists(qualifier):
not_found = not_found_error("{0}:{1}".format(arn, qualifier))
if not_found:
try:
forward_result = forward_to_fallback_url(arn, json.dumps(data))
if forward_result is not None:
return _create_response(forward_result)
except Exception as e:
LOG.debug('Unable to forward Lambda invocation to fallback URL: "%s" - %s' % (data, e))
return not_found
if invocation_type == "RequestResponse":
context = {"client_context": request.headers.get("X-Amz-Client-Context")}
result = run_lambda(
func_arn=arn,
event=data,
context=context,
asynchronous=False,
version=qualifier,
)
return _create_response(result)
elif invocation_type == "Event":
run_lambda(func_arn=arn, event=data, context={}, asynchronous=True, version=qualifier)
return _create_response("", status_code=202)
elif invocation_type == "DryRun":
# Assume the dry run always passes.
return _create_response("", status_code=204)
return error_response(
"Invocation type not one of: RequestResponse, Event or DryRun",
code=400,
error_type="InvalidParameterValueException",
)
@app.route("%s/event-source-mappings" % PATH_ROOT, methods=["GET"], strict_slashes=False)
def get_event_source_mappings():
"""List event source mappings
---
operationId: 'listEventSourceMappings'
"""
region = LambdaRegion.get()
event_source_arn = request.args.get("EventSourceArn")
function_name = request.args.get("FunctionName")
mappings = region.event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get("EventSourceArn")]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get("FunctionArn")]
response = {"EventSourceMappings": mappings}
return jsonify(response)
@app.route("%s/event-source-mappings/<mapping_uuid>" % PATH_ROOT, methods=["GET"])
def get_event_source_mapping(mapping_uuid):
"""Get an existing event source mapping
---
operationId: 'getEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
mappings = region.event_source_mappings
mappings = [m for m in mappings if mapping_uuid == m.get("UUID")]
if len(mappings) == 0:
return not_found_error()
return jsonify(mappings[0])
@app.route("%s/event-source-mappings" % PATH_ROOT, methods=["POST"], strict_slashes=False)
def create_event_source_mapping():
"""Create new event source mapping
---
operationId: 'createEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
try:
mapping = add_event_source(data)
return jsonify(mapping)
except ValueError as error:
error_type, message = error.args
return error_response(message, code=400, error_type=error_type)
@app.route("%s/event-source-mappings/<mapping_uuid>" % PATH_ROOT, methods=["PUT"])
def update_event_source_mapping(mapping_uuid):
"""Update an existing event source mapping
---
operationId: 'updateEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
if not mapping_uuid:
return jsonify({})
try:
mapping = update_event_source(mapping_uuid, data)
return jsonify(mapping)
except ValueError as error:
error_type, message = error.args
return error_response(message, code=400, error_type=error_type)
@app.route("%s/event-source-mappings/<mapping_uuid>" % PATH_ROOT, methods=["DELETE"])
def delete_event_source_mapping(mapping_uuid):
"""Delete an event source mapping
---
operationId: 'deleteEventSourceMapping'
"""
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
@app.route("%s/functions/<function>/versions" % PATH_ROOT, methods=["POST"])
def publish_version(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
return jsonify(publish_new_function_version(arn))
@app.route("%s/functions/<function>/versions" % PATH_ROOT, methods=["GET"])
def list_versions(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
return jsonify({"Versions": do_list_versions(arn)})
@app.route("%s/functions/<function>/aliases" % PATH_ROOT, methods=["POST"])
def create_alias(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
data = json.loads(request.data)
alias = data.get("Name")
if alias in region.lambdas.get(arn).aliases:
return error_response(
"Alias already exists: %s" % arn + ":" + alias,
404,
error_type="ResourceConflictException",
)
version = data.get("FunctionVersion")
description = data.get("Description")
return jsonify(do_update_alias(arn, alias, version, description))
@app.route("%s/functions/<function>/aliases/<name>" % PATH_ROOT, methods=["PUT"])
def update_alias(function, name):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
if name not in region.lambdas.get(arn).aliases:
return not_found_error(msg="Alias not found: %s:%s" % (arn, name))
current_alias = region.lambdas.get(arn).aliases.get(name)
data = json.loads(request.data)
version = data.get("FunctionVersion") or current_alias.get("FunctionVersion")
description = data.get("Description") or current_alias.get("Description")
return jsonify(do_update_alias(arn, name, version, description))
@app.route("%s/functions/<function>/aliases/<name>" % PATH_ROOT, methods=["GET"])
def get_alias(function, name):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
if name not in region.lambdas.get(arn).aliases:
return not_found_error(msg="Alias not found: %s:%s" % (arn, name))
return jsonify(region.lambdas.get(arn).aliases.get(name))
@app.route("%s/functions/<function>/aliases" % PATH_ROOT, methods=["GET"])
def list_aliases(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
return jsonify(
{"Aliases": sorted(region.lambdas.get(arn).aliases.values(), key=lambda x: x["Name"])}
)
@app.route("%s/functions/<function>/aliases/<name>" % PATH_ROOT, methods=["DELETE"])
def delete_alias(function, name):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
lambda_details = region.lambdas.get(arn)
if name not in lambda_details.aliases:
return not_found_error(msg="Alias not found: %s:%s" % (arn, name))
lambda_details.aliases.pop(name)
return jsonify({})
@app.route("/<version>/functions/<function>/concurrency", methods=["GET", "PUT", "DELETE"])
def function_concurrency(version, function):
region = LambdaRegion.get()
# the version for put_concurrency != PATH_ROOT, at the time of this
# writing it's: /2017-10-31 for this endpoint
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
arn = func_arn(function)
lambda_details = region.lambdas.get(arn)
if not lambda_details:
return not_found_error(arn)
if request.method == "GET":
data = lambda_details.concurrency
if request.method == "PUT":
data = json.loads(request.data)
lambda_details.concurrency = data
if request.method == "DELETE":
lambda_details.concurrency = None
return Response("", status=204)
return jsonify(data)
@app.route("/<version>/tags/<arn>", methods=["GET"])
def list_tags(version, arn):
region = LambdaRegion.get()
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
result = {"Tags": func_details.tags}
return jsonify(result)
@app.route("/<version>/tags/<arn>", methods=["POST"])
def tag_resource(version, arn):
region = LambdaRegion.get()
data = json.loads(request.data)
tags = data.get("Tags", {})
if tags:
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
if func_details:
func_details.tags.update(tags)
return jsonify({})
@app.route("/<version>/tags/<arn>", methods=["DELETE"])
def untag_resource(version, arn):
region = LambdaRegion.get()
tag_keys = request.args.getlist("tagKeys")
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
for tag_key in tag_keys:
func_details.tags.pop(tag_key, None)
return jsonify({})
@app.route("/2019-09-25/functions/<function>/event-invoke-config", methods=["PUT", "POST"])
def put_function_event_invoke_config(function):
# TODO: resouce validation required to check if resource exists
"""Add/Updates the configuration for asynchronous invocation for a function
---
operationId: PutFunctionEventInvokeConfig | UpdateFunctionEventInvokeConfig
parameters:
- name: 'function'
in: path
- name: 'qualifier'
in: path
- name: 'request'
in: body
"""
region = LambdaRegion.get()
data = json.loads(to_str(request.data))
function_arn = func_arn(function)
lambda_obj = region.lambdas.get(function_arn)
if not lambda_obj:
return not_found_error("Unable to find Lambda ARN: %s" % function_arn)
if request.method == "PUT":
response = lambda_obj.clear_function_event_invoke_config()
response = lambda_obj.put_function_event_invoke_config(data)
return jsonify(
{
"LastModified": response.last_modified.strftime(DATE_FORMAT),
"FunctionArn": str(function_arn),
"MaximumRetryAttempts": response.max_retry_attempts,
"MaximumEventAgeInSeconds": response.max_event_age,
"DestinationConfig": {
"OnSuccess": {"Destination": str(response.on_successful_invocation)},
"OnFailure": {"Destination": str(response.on_failed_invocation)},
},
}
)
@app.route("/2019-09-25/functions/<function>/event-invoke-config", methods=["GET"])
def get_function_event_invoke_config(function):
"""Retrieves the configuration for asynchronous invocation for a function
---
operationId: GetFunctionEventInvokeConfig
parameters:
- name: 'function'
in: path
- name: 'qualifier'
in: path
- name: 'request'
in: body
"""
region = LambdaRegion.get()
try:
function_arn = func_arn(function)
lambda_obj = region.lambdas[function_arn]
except Exception:
return not_found_error("Unable to find Lambda function ARN %s" % function_arn)
response = lambda_obj.get_function_event_invoke_config()
if not response:
msg = "The function %s doesn't have an EventInvokeConfig" % function_arn
return not_found_error(msg)
return jsonify(response)
@app.route("/2019-09-25/functions/<function>/event-invoke-config", methods=["DELETE"])
def delete_function_event_invoke_config(function):
region = LambdaRegion.get()
try:
function_arn = func_arn(function)
lambda_obj = region.lambdas[function_arn]
except Exception as e:
return error_response(str(e), 400)
lambda_obj.clear_function_event_invoke_config()
return Response("", status=204)
@app.route("/2020-06-30/functions/<function>/code-signing-config", methods=["GET"])
def get_function_code_signing_config(function):
region = LambdaRegion.get()
function_arn = func_arn(function)
if function_arn not in region.lambdas:
msg = "Function not found: %s" % (function_arn)
return not_found_error(msg)
lambda_obj = region.lambdas[function_arn]
if not lambda_obj.code_signing_config_arn:
arn = None
function = None
else:
arn = lambda_obj.code_signing_config_arn
result = {"CodeSigningConfigArn": arn, "FunctionName": function}
return Response(json.dumps(result), status=200)
@app.route("/2020-06-30/functions/<function>/code-signing-config", methods=["PUT"])
def put_function_code_signing_config(function):
region = LambdaRegion.get()
data = json.loads(request.data)
arn = data.get("CodeSigningConfigArn")
if arn not in region.code_signing_configs:
msg = """The code signing configuration cannot be found.
Check that the provided configuration is not deleted: %s.""" % (
arn
)
return error_response(msg, 404, error_type="CodeSigningConfigNotFoundException")
function_arn = func_arn(function)
if function_arn not in region.lambdas:
msg = "Function not found: %s" % (function_arn)
return not_found_error(msg)
lambda_obj = region.lambdas[function_arn]
if data.get("CodeSigningConfigArn"):
lambda_obj.code_signing_config_arn = arn
result = {"CodeSigningConfigArn": arn, "FunctionName": function}
return Response(json.dumps(result), status=200)
@app.route("/2020-06-30/functions/<function>/code-signing-config", methods=["DELETE"])
def delete_function_code_signing_config(function):
region = LambdaRegion.get()
function_arn = func_arn(function)
if function_arn not in region.lambdas:
msg = "Function not found: %s" % (function_arn)
return not_found_error(msg)
lambda_obj = region.lambdas[function_arn]
lambda_obj.code_signing_config_arn = None
return Response("", status=204)
@app.route("/2020-04-22/code-signing-configs/", methods=["POST"])
def create_code_signing_config():
region = LambdaRegion.get()
data = json.loads(request.data)
signing_profile_version_arns = data.get("AllowedPublishers").get("SigningProfileVersionArns")
code_signing_id = "csc-%s" % long_uid().replace("-", "")[0:17]
arn = aws_stack.code_signing_arn(code_signing_id)
region.code_signing_configs[arn] = CodeSigningConfig(
arn, code_signing_id, signing_profile_version_arns
)
code_signing_obj = region.code_signing_configs[arn]
if data.get("Description"):
code_signing_obj.description = data["Description"]
if data.get("CodeSigningPolicies", {}).get("UntrustedArtifactOnDeployment"):
code_signing_obj.untrusted_artifact_on_deployment = data["CodeSigningPolicies"][
"UntrustedArtifactOnDeployment"
]
code_signing_obj.last_modified = format_timestamp()
result = {
"CodeSigningConfig": {
"AllowedPublishers": {
"SigningProfileVersionArns": code_signing_obj.signing_profile_version_arns
},
"CodeSigningConfigArn": code_signing_obj.arn,
"CodeSigningConfigId": code_signing_obj.id,
"CodeSigningPolicies": {
"UntrustedArtifactOnDeployment": code_signing_obj.untrusted_artifact_on_deployment
},
"Description": code_signing_obj.description,
"LastModified": code_signing_obj.last_modified,
}
}
return Response(json.dumps(result), status=201)
@app.route("/2020-04-22/code-signing-configs/<arn>", methods=["GET"])
def get_code_signing_config(arn):
region = LambdaRegion.get()
try:
code_signing_obj = region.code_signing_configs[arn]
except KeyError:
msg = "The Lambda code signing configuration %s can not be found." % arn
return not_found_error(msg)
result = {
"CodeSigningConfig": {
"AllowedPublishers": {
"SigningProfileVersionArns": code_signing_obj.signing_profile_version_arns
},
"CodeSigningConfigArn": code_signing_obj.arn,
"CodeSigningConfigId": code_signing_obj.id,
"CodeSigningPolicies": {
"UntrustedArtifactOnDeployment": code_signing_obj.untrusted_artifact_on_deployment
},
"Description": code_signing_obj.description,
"LastModified": code_signing_obj.last_modified,
}
}
return Response(json.dumps(result), status=200)
@app.route("/2020-04-22/code-signing-configs/<arn>", methods=["DELETE"])
def delete_code_signing_config(arn):
region = LambdaRegion.get()
try:
region.code_signing_configs.pop(arn)
except KeyError:
msg = "The Lambda code signing configuration %s can not be found." % (arn)
return not_found_error(msg)
return Response("", status=204)
@app.route("/2020-04-22/code-signing-configs/<arn>", methods=["PUT"])
def update_code_signing_config(arn):
region = LambdaRegion.get()
try:
code_signing_obj = region.code_signing_configs[arn]
except KeyError:
msg = "The Lambda code signing configuration %s can not be found." % (arn)
return not_found_error(msg)
data = json.loads(request.data)
is_updated = False
if data.get("Description"):
code_signing_obj.description = data["Description"]
is_updated = True
if data.get("AllowedPublishers", {}).get("SigningProfileVersionArns"):
code_signing_obj.signing_profile_version_arns = data["AllowedPublishers"][
"SigningProfileVersionArns"
]
is_updated = True
if data.get("CodeSigningPolicies", {}).get("UntrustedArtifactOnDeployment"):
code_signing_obj.untrusted_artifact_on_deployment = data["CodeSigningPolicies"][
"UntrustedArtifactOnDeployment"
]
is_updated = True
if is_updated:
code_signing_obj.last_modified = format_timestamp()
result = {
"CodeSigningConfig": {
"AllowedPublishers": {
"SigningProfileVersionArns": code_signing_obj.signing_profile_version_arns
},
"CodeSigningConfigArn": code_signing_obj.arn,
"CodeSigningConfigId": code_signing_obj.id,
"CodeSigningPolicies": {
"UntrustedArtifactOnDeployment": code_signing_obj.untrusted_artifact_on_deployment
},
"Description": code_signing_obj.description,
"LastModified": code_signing_obj.last_modified,
}
}
return Response(json.dumps(result), status=200)
def serve(port):
from localstack.services import generic_proxy # moved here to fix circular import errors
# initialize the Lambda executor
LAMBDA_EXECUTOR.startup()
# initialize/import plugins - TODO find better place to import plugins! (to be integrated into proper plugin model)
import localstack.plugin.thundra # noqa
generic_proxy.serve_flask_app(app=app, port=port)
# Config listener
def on_config_change(config_key: str, config_newvalue: str) -> None:
global LAMBDA_EXECUTOR
if config_key != "LAMBDA_EXECUTOR":
return
LOG.debug(
"Received config event for lambda executor - Key: '{}', Value: {}".format(
config_key, config_newvalue
)
)
LAMBDA_EXECUTOR.cleanup()
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(
config_newvalue, lambda_executors.DEFAULT_EXECUTOR
)
LAMBDA_EXECUTOR.startup()
def register_config_listener():
from localstack.utils import config_listener
config_listener.CONFIG_LISTENERS.append(on_config_change)
register_config_listener()
| 1 | 13,168 | just to clarify - on line 1074 we update `zip_file_content` for non-local lambdas, but never store it, which means lambda never picks it up | localstack-localstack | py |
@@ -170,10 +170,18 @@ func udpPkt(src, dst string) packet {
func icmpPkt(src, dst string) packet {
return packetWithPorts(1, src+":0", dst+":0")
}
+func icmpPkt_with_type_code(src, dst string, icmpType, icmpCode int) packet {
+ return packet{
+ protocol: 1,
+ srcAddr: src,
+ srcPort: 0,
+ dstAddr: dst,
+ dstPort: (icmpCode << 8) | (icmpType),
+ }
+}
var polProgramTests = []polProgramTest{
// Tests of actions and flow control.
-
{
PolicyName: "no tiers",
DroppedPackets: []packet{ | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ut_test
import (
"encoding/binary"
"fmt"
"net"
"strconv"
"strings"
"testing"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/asm"
"github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf/polprog"
"github.com/projectcalico/felix/bpf/state"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/proto"
)
func TestLoadAllowAllProgram(t *testing.T) {
RegisterTestingT(t)
b := asm.NewBlock()
b.MovImm32(asm.R0, -1)
b.Exit()
insns, err := b.Assemble()
Expect(err).NotTo(HaveOccurred())
fd, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred())
Expect(fd).NotTo(BeZero())
defer func() {
Expect(fd.Close()).NotTo(HaveOccurred())
}()
rc, err := bpf.RunBPFProgram(fd, make([]byte, 500), 1)
Expect(err).NotTo(HaveOccurred())
Expect(rc.RC).To(BeNumerically("==", -1))
}
func TestLoadProgramWithMapAcccess(t *testing.T) {
RegisterTestingT(t)
ipsMap := ipsets.Map(&bpf.MapContext{})
Expect(ipsMap.EnsureExists()).NotTo(HaveOccurred())
Expect(ipsMap.MapFD()).NotTo(BeZero())
b := asm.NewBlock()
b.MovImm64(asm.R1, 0)
b.StoreStack64(asm.R1, -8)
b.StoreStack64(asm.R1, -16)
b.StoreStack64(asm.R1, -24)
b.StoreStack64(asm.R1, -32)
b.Mov64(asm.R2, asm.R10)
b.AddImm64(asm.R2, -32)
b.LoadMapFD(asm.R1, uint32(ipsMap.MapFD()))
b.Call(asm.HelperMapLookupElem)
b.MovImm32(asm.R0, -1)
b.Exit()
insns, err := b.Assemble()
Expect(err).NotTo(HaveOccurred())
fd, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred())
Expect(fd).NotTo(BeZero())
defer func() {
Expect(fd.Close()).NotTo(HaveOccurred())
}()
rc, err := bpf.RunBPFProgram(fd, make([]byte, 500), 1)
Expect(err).NotTo(HaveOccurred())
Expect(rc.RC).To(BeNumerically("==", -1))
}
func TestLoadKitchenSinkPolicy(t *testing.T) {
RegisterTestingT(t)
alloc := idalloc.New()
allocID := func(id string) string {
alloc.GetOrAlloc(id)
return id
}
cleanIPSetMap()
pg := polprog.NewBuilder(alloc, ipsMap.MapFD(), stateMap.MapFD(), jumpMap.MapFD())
insns, err := pg.Instructions([][][]*proto.Rule{{{{
Action: "Allow",
IpVersion: 4,
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Number{Number: 6}},
SrcNet: []string{"10.0.0.0/8"},
SrcPorts: []*proto.PortRange{{First: 80, Last: 81}, {First: 8080, Last: 8081}},
SrcNamedPortIpSetIds: []string{allocID("n:abcdef1234567890")},
DstNet: []string{"11.0.0.0/8"},
DstPorts: []*proto.PortRange{{First: 3000, Last: 3001}},
DstNamedPortIpSetIds: []string{allocID("n:foo1234567890")},
Icmp: nil,
SrcIpSetIds: []string{allocID("s:sbcdef1234567890")},
DstIpSetIds: []string{allocID("s:dbcdef1234567890")},
NotProtocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "UDP"}},
NotSrcNet: []string{"12.0.0.0/8"},
NotSrcPorts: []*proto.PortRange{{First: 5000, Last: 5000}},
NotDstNet: []string{"13.0.0.0/8"},
NotDstPorts: []*proto.PortRange{{First: 4000, Last: 4000}},
NotIcmp: nil,
NotSrcIpSetIds: []string{allocID("s:abcdef1234567890")},
NotDstIpSetIds: []string{allocID("s:abcdef123456789l")},
NotSrcNamedPortIpSetIds: []string{allocID("n:0bcdef1234567890")},
NotDstNamedPortIpSetIds: []string{allocID("n:0bcdef1234567890")},
}}}})
Expect(err).NotTo(HaveOccurred())
fd, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred())
Expect(fd).NotTo(BeZero())
Expect(fd.Close()).NotTo(HaveOccurred())
}
const (
RCDrop = 2
RCEpilogueReached = 123
)
func packetWithPorts(proto int, src, dst string) packet {
parts := strings.Split(src, ":")
srcAddr := parts[0]
srcPort, err := strconv.Atoi(parts[1])
if err != nil {
panic(err)
}
parts = strings.Split(dst, ":")
dstAddr := parts[0]
dstPort, err := strconv.Atoi(parts[1])
if err != nil {
panic(err)
}
return packet{
protocol: proto,
srcAddr: srcAddr,
srcPort: srcPort,
dstAddr: dstAddr,
dstPort: dstPort,
}
}
func tcpPkt(src, dst string) packet {
return packetWithPorts(6, src, dst)
}
func udpPkt(src, dst string) packet {
return packetWithPorts(17, src, dst)
}
func icmpPkt(src, dst string) packet {
return packetWithPorts(1, src+":0", dst+":0")
}
var polProgramTests = []polProgramTest{
// Tests of actions and flow control.
{
PolicyName: "no tiers",
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "unreachable tier",
Policy: [][][]*proto.Rule{
{},
{{{
Action: "Allow",
}}},
},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "pass to nowhere",
Policy: [][][]*proto.Rule{
{{{
Action: "Pass",
}}},
},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "pass to allow",
Policy: [][][]*proto.Rule{
{
{
{Action: "Pass"},
{Action: "Deny"},
},
},
{
{
{Action: "Allow"},
},
},
},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "pass to deny",
Policy: [][][]*proto.Rule{
{
{
{Action: "Pass"},
{Action: "Allow"},
},
},
{
{
{Action: "Deny"},
},
},
},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "explicit allow",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "explicit deny",
Policy: [][][]*proto.Rule{{{{
Action: "Deny",
}}}},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
// Protocol match tests.
{
PolicyName: "allow tcp",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow !tcp",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotProtocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow udp",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "udp"}},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
// CIDR tests.
{
PolicyName: "allow 10.0.0.1/32",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNet: []string{"10.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from 10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow from CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from !CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotSrcNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
},
{
PolicyName: "allow to CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
},
{
PolicyName: "allow to !CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotDstNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from !10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotSrcNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow to 10.0.0.1/32",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNet: []string{"10.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
},
{
PolicyName: "allow to 10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("11.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
},
{
PolicyName: "allow to !10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotDstNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
// Port tests.
{
PolicyName: "allow from tcp:80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow from tcp:80-81",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcPorts: []*proto.PortRange{{
First: 80,
Last: 81,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
tcpPkt("10.0.0.2:81", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:79", "10.0.0.1:31245"),
tcpPkt("10.0.0.2:82", "10.0.0.1:31245"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from tcp:0-80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcPorts: []*proto.PortRange{{
First: 0,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:0", "10.0.0.1:31245"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:81", "10.0.0.1:31245")},
},
{
PolicyName: "allow to tcp:80-65535",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{{
First: 80,
Last: 65535,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:65535")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:79")},
},
{
PolicyName: "allow to tcp:ranges",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{
{First: 80, Last: 81},
{First: 90, Last: 90},
},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:81"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:90")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:79"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:82"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:89"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:91"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
},
{
PolicyName: "allow to tcp:!ranges",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotDstPorts: []*proto.PortRange{
{First: 80, Last: 81},
{First: 90, Last: 90},
},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:79"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:82"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:89"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:91")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:81"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:90"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
},
{
PolicyName: "allow from tcp:!80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotSrcPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow to tcp:80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
// BPF immediate values are signed, check that we don't get tripped up by a sign extension.
PolicyName: "allow to tcp:65535",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{{
First: 65535,
Last: 65535,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:65535")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow to tcp:!80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotDstPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
// IP set tests.
{
PolicyName: "allow from empty IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcIpSetIds: []string{"setA"},
}}}},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
IPSets: map[string][]string{
"setA": {},
},
},
{
PolicyName: "allow from !empty IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotSrcIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
IPSets: map[string][]string{
"setA": {},
},
},
{
PolicyName: "allow from IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080")},
IPSets: map[string][]string{
"setA": {"10.0.0.0/8"},
},
},
{
PolicyName: "allow to IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
IPSets: map[string][]string{
"setA": {"11.0.0.0/8", "123.0.0.1/32"},
},
},
{
PolicyName: "allow from !IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotSrcIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
IPSets: map[string][]string{
"setA": {"10.0.0.0/8"},
},
},
{
PolicyName: "allow to !IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotDstIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
IPSets: map[string][]string{
"setA": {"11.0.0.0/8", "123.0.0.1/32"},
},
},
{
PolicyName: "allow to named port",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNamedPortIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"), // Wrong port
udpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Wrong proto
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Src/dest confusion
tcpPkt("10.0.0.2:31245", "10.0.0.1:80"), // Wrong dest
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80", "123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow to named ports",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNamedPortIpSetIds: []string{"setA", "setB"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"), // Wrong port
udpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Wrong proto
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Src/dest confusion
tcpPkt("10.0.0.2:31245", "10.0.0.1:80"), // Wrong dest
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80"},
"setB": {"123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow to mixed ports",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
// Should match either port or named port
DstPorts: []*proto.PortRange{
{First: 81, Last: 82},
{First: 90, Last: 90},
},
DstNamedPortIpSetIds: []string{"setA", "setB"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:90"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:82")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"), // Wrong port
udpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Wrong proto
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Src/dest confusion
tcpPkt("10.0.0.2:31245", "10.0.0.1:80"), // Wrong dest
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80"},
"setB": {"123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow from named port",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNamedPortIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
udpPkt("123.0.0.1:1024", "10.0.0.2:12345"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:8080", "11.0.0.1:12345"), // Wrong port
udpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Wrong proto
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Src/dest confusion
tcpPkt("10.0.0.1:80", "10.0.0.2:31245"), // Wrong src
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80", "123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow from named ports",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNamedPortIpSetIds: []string{"setA", "setB"},
}}}},
AllowedPackets: []packet{
udpPkt("123.0.0.1:1024", "10.0.0.2:12345"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:8080", "11.0.0.1:12345"), // Wrong port
udpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Wrong proto
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Src/dest confusion
tcpPkt("10.0.0.1:80", "10.0.0.2:31245"), // Wrong src
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80"},
"setB": {"123.0.0.1/32,udp:1024"},
},
},
// TODO ICMP
}
func TestPolicyPrograms(t *testing.T) {
for i, p := range polProgramTests {
t.Run(fmt.Sprintf("%d:Policy=%s", i, p.PolicyName), p.Run)
}
}
type polProgramTest struct {
PolicyName string
Policy [][][]*proto.Rule
AllowedPackets []packet
DroppedPackets []packet
IPSets map[string][]string
}
type packet struct {
protocol int
srcAddr string
srcPort int
dstAddr string
dstPort int
}
func (p packet) String() string {
protoName := fmt.Sprint(p.protocol)
switch p.protocol {
case 6:
protoName = "tcp"
case 17:
protoName = "udp"
case 1:
protoName = "icmp"
}
return fmt.Sprintf("%s-%s:%d->%s:%d", protoName, p.srcAddr, p.srcPort, p.dstAddr, p.dstPort)
}
func (p packet) ToState() state.State {
return state.State{
IPProto: uint8(p.protocol),
SrcAddr: ipUintFromString(p.srcAddr),
PostNATDstAddr: ipUintFromString(p.dstAddr),
SrcPort: uint16(p.srcPort),
PostNATDstPort: uint16(p.dstPort),
}
}
func ipUintFromString(addrStr string) uint32 {
if addrStr == "" {
return 0
}
addr := net.ParseIP(addrStr)
return binary.LittleEndian.Uint32(addr.To4())
}
func TestIPUintFromString(t *testing.T) {
RegisterTestingT(t)
Expect(ipUintFromString("10.0.0.1")).To(Equal(uint32(0x0100000a)))
}
func (p *polProgramTest) Run(t *testing.T) {
RegisterTestingT(t)
// The prog builder refuses to allocate IDs as a precaution, give it an allocator that forces allocations.
realAlloc := idalloc.New()
forceAlloc := &forceAllocator{alloc: realAlloc}
// MAke sure the maps are available.
cleanIPSetMap()
// FIXME should clean up the maps at the end of each test but recreating the maps seems to be racy
p.setUpIPSets(realAlloc, ipsMap)
// Build the program.
pg := polprog.NewBuilder(forceAlloc, ipsMap.MapFD(), testStateMap.MapFD(), jumpMap.MapFD())
insns, err := pg.Instructions(p.Policy)
Expect(err).NotTo(HaveOccurred(), "failed to assemble program")
// Load the program into the kernel. We don't pin it so it'll be removed when the
// test process exits (or by the defer).
polProgFD, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred(), "failed to load program into the kernel")
Expect(polProgFD).NotTo(BeZero())
defer func() {
err := polProgFD.Close()
Expect(err).NotTo(HaveOccurred())
}()
// Give the policy program somewhere to jump to.
epiFD := p.installEpilogueProgram(jumpMap)
defer func() {
err := epiFD.Close()
Expect(err).NotTo(HaveOccurred())
}()
log.Debug("Setting up state map")
for _, pkt := range p.AllowedPackets {
pkt := pkt
t.Run(fmt.Sprintf("should allow %v", pkt), func(t *testing.T) {
RegisterTestingT(t)
p.runProgram(pkt.ToState(), testStateMap, polProgFD, RCEpilogueReached, polprog.PolRCAllow)
})
}
for _, pkt := range p.DroppedPackets {
pkt := pkt
t.Run(fmt.Sprintf("should drop %v", pkt), func(t *testing.T) {
RegisterTestingT(t)
p.runProgram(pkt.ToState(), testStateMap, polProgFD, RCDrop, polprog.PolRCNoMatch)
})
}
}
// installEpilogueProgram installs a trivial BPF program into the jump table that returns RCEpilogueReached.
func (p *polProgramTest) installEpilogueProgram(jumpMap bpf.Map) bpf.ProgFD {
b := asm.NewBlock()
// Load the RC into the return register.
b.MovImm64(asm.R0, RCEpilogueReached)
// Exit!
b.Exit()
epiInsns, err := b.Assemble()
Expect(err).NotTo(HaveOccurred())
epiFD, err := bpf.LoadBPFProgramFromInsns(epiInsns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred(), "failed to load program into the kernel")
Expect(epiFD).NotTo(BeZero())
jumpValue := make([]byte, 4)
binary.LittleEndian.PutUint32(jumpValue, uint32(epiFD))
err = jumpMap.Update([]byte{1, 0, 0, 0}, jumpValue)
Expect(err).NotTo(HaveOccurred())
return epiFD
}
func (p *polProgramTest) runProgram(stateIn state.State, stateMap bpf.Map, progFD bpf.ProgFD, expProgRC int, expPolRC int) {
// The policy program takes its input from the state map (rather than looking at the
// packet). Set up the state map.
stateMapKey := []byte{0, 0, 0, 0} // State map has a single key
stateBytesIn := stateIn.AsBytes()
log.WithField("stateBytes", stateBytesIn).Debug("State bytes in")
log.Debugf("State in %#v", stateIn)
err := stateMap.Update(stateMapKey, stateBytesIn)
Expect(err).NotTo(HaveOccurred(), "failed to update state map")
log.Debug("Running BPF program")
result, err := bpf.RunBPFProgram(progFD, make([]byte, 1000), 1)
Expect(err).NotTo(HaveOccurred())
log.Debug("Checking result...")
stateBytesOut, err := stateMap.Get(stateMapKey)
Expect(err).NotTo(HaveOccurred())
log.WithField("stateBytes", stateBytesOut).Debug("State bytes out")
stateOut := state.StateFromBytes(stateBytesOut)
log.Debugf("State out %#v", stateOut)
Expect(stateOut.PolicyRC).To(BeNumerically("==", expPolRC), "policy RC was incorrect")
Expect(result.RC).To(BeNumerically("==", expProgRC), "program RC was incorrect")
// Check no other fields got clobbered.
expectedStateOut := stateIn
expectedStateOut.PolicyRC = int32(expPolRC)
Expect(stateOut).To(Equal(expectedStateOut), "policy program modified unexpected parts of the state")
}
func (p *polProgramTest) setUpIPSets(alloc *idalloc.IDAllocator, ipsMap bpf.Map) {
for name, members := range p.IPSets {
id := alloc.GetOrAlloc(name)
for _, m := range members {
entry := ipsets.ProtoIPSetMemberToBPFEntry(id, m)
err := ipsMap.Update(entry[:], ipsets.DummyValue)
Expect(err).NotTo(HaveOccurred())
}
}
}
func cleanIPSetMap() {
// Clean out any existing IP sets. (The other maps have a fixed number of keys that
// we set as needed.)
var keys [][]byte
err := ipsMap.Iter(func(k, v []byte) {
keys = append(keys, k)
})
Expect(err).NotTo(HaveOccurred(), "failed to clean out map before test")
for _, k := range keys {
err = ipsMap.Delete(k)
Expect(err).NotTo(HaveOccurred(), "failed to clean out map before test")
}
}
| 1 | 17,658 | Golang naming convention is to use camel case `icmpPktWithTypeCode` Often the linter will complain | projectcalico-felix | go |
@@ -127,11 +127,7 @@ namespace OpenTelemetry.Exporter.Zipkin
{
using (var response = await client.SendAsync(request).ConfigureAwait(false))
{
- if (response.StatusCode != HttpStatusCode.OK &&
- response.StatusCode != HttpStatusCode.Accepted)
- {
- var statusCode = (int)response.StatusCode;
- }
+ // disposing response
}
}
| 1 | // <copyright file="ZipkinTraceExporter.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Net.Sockets;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
using OpenTelemetry.Exporter.Zipkin.Implementation;
using OpenTelemetry.Trace.Export;
namespace OpenTelemetry.Exporter.Zipkin
{
/// <summary>
/// Zipkin exporter.
/// </summary>
public class ZipkinTraceExporter : SpanExporter
{
private const long MillisPerSecond = 1000L;
private const long NanosPerMillisecond = 1000 * 1000;
private const long NanosPerSecond = NanosPerMillisecond * MillisPerSecond;
private static readonly JsonSerializerOptions Options = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
};
private readonly ZipkinTraceExporterOptions options;
private readonly ZipkinEndpoint localEndpoint;
private readonly HttpClient httpClient;
private readonly string serviceEndpoint;
/// <summary>
/// Initializes a new instance of the <see cref="ZipkinTraceExporter"/> class.
/// </summary>
/// <param name="options">Configuration options.</param>
/// <param name="client">Http client to use to upload telemetry.</param>
public ZipkinTraceExporter(ZipkinTraceExporterOptions options, HttpClient client = null)
{
this.options = options;
this.localEndpoint = this.GetLocalZipkinEndpoint();
this.httpClient = client ?? new HttpClient();
this.serviceEndpoint = options.Endpoint?.ToString();
}
/// <inheritdoc/>
public override async Task<ExportResult> ExportAsync(IEnumerable<SpanData> otelSpanList, CancellationToken cancellationToken)
{
var zipkinSpans = new List<ZipkinSpan>();
foreach (var data in otelSpanList)
{
bool shouldExport = true;
foreach (var label in data.Attributes)
{
if (label.Key == "http.url")
{
if (label.Value is string urlStr && urlStr == this.serviceEndpoint)
{
// do not track calls to Zipkin
shouldExport = false;
}
break;
}
}
if (shouldExport)
{
var zipkinSpan = data.ToZipkinSpan(this.localEndpoint, this.options.UseShortTraceIds);
zipkinSpans.Add(zipkinSpan);
}
}
if (zipkinSpans.Count == 0)
{
return ExportResult.Success;
}
try
{
await this.SendSpansAsync(zipkinSpans, cancellationToken);
return ExportResult.Success;
}
catch (Exception)
{
// TODO distinguish retryable exceptions
return ExportResult.FailedNotRetryable;
}
}
/// <inheritdoc/>
public override Task ShutdownAsync(CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
private Task SendSpansAsync(IEnumerable<ZipkinSpan> spans, CancellationToken cancellationToken)
{
var requestUri = this.options.Endpoint;
var request = this.GetHttpRequestMessage(HttpMethod.Post, requestUri);
request.Content = this.GetRequestContent(spans);
// avoid cancelling here: this is no return point: if we reached this point
// and cancellation is requested, it's better if we try to finish sending spans rather than drop it
return this.DoPostAsync(this.httpClient, request);
}
private async Task DoPostAsync(HttpClient client, HttpRequestMessage request)
{
using (var response = await client.SendAsync(request).ConfigureAwait(false))
{
if (response.StatusCode != HttpStatusCode.OK &&
response.StatusCode != HttpStatusCode.Accepted)
{
var statusCode = (int)response.StatusCode;
}
}
}
private HttpRequestMessage GetHttpRequestMessage(HttpMethod method, Uri requestUri)
{
var request = new HttpRequestMessage(method, requestUri);
return request;
}
private HttpContent GetRequestContent(IEnumerable<ZipkinSpan> toSerialize)
{
return new JsonContent(toSerialize, Options);
}
private ZipkinEndpoint GetLocalZipkinEndpoint()
{
var result = new ZipkinEndpoint()
{
ServiceName = this.options.ServiceName,
};
var hostName = this.ResolveHostName();
if (!string.IsNullOrEmpty(hostName))
{
result.Ipv4 = this.ResolveHostAddress(hostName, AddressFamily.InterNetwork);
result.Ipv6 = this.ResolveHostAddress(hostName, AddressFamily.InterNetworkV6);
}
return result;
}
private string ResolveHostAddress(string hostName, AddressFamily family)
{
string result = null;
try
{
var results = Dns.GetHostAddresses(hostName);
if (results != null && results.Length > 0)
{
foreach (var addr in results)
{
if (addr.AddressFamily.Equals(family))
{
var sanitizedAddress = new IPAddress(addr.GetAddressBytes()); // Construct address sans ScopeID
result = sanitizedAddress.ToString();
break;
}
}
}
}
catch (Exception)
{
// Ignore
}
return result;
}
private string ResolveHostName()
{
string result = null;
try
{
result = Dns.GetHostName();
if (!string.IsNullOrEmpty(result))
{
var response = Dns.GetHostEntry(result);
if (response != null)
{
return response.HostName;
}
}
}
catch (Exception)
{
// Ignore
}
return result;
}
private class JsonContent : HttpContent
{
private static readonly MediaTypeHeaderValue JsonHeader = new MediaTypeHeaderValue("application/json")
{
CharSet = "utf-8",
};
private readonly IEnumerable<ZipkinSpan> spans;
private readonly JsonSerializerOptions options;
public JsonContent(IEnumerable<ZipkinSpan> spans, JsonSerializerOptions options)
{
this.spans = spans;
this.options = options;
this.Headers.ContentType = JsonHeader;
}
protected override async Task SerializeToStreamAsync(Stream stream, TransportContext context)
=> await JsonSerializer.SerializeAsync(stream, this.spans, this.options).ConfigureAwait(false);
protected override bool TryComputeLength(out long length)
{
// We can't know the length of the content being pushed to the output stream.
length = -1;
return false;
}
}
}
}
| 1 | 13,220 | We can do away with assigning the response here too. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -18,7 +18,7 @@ function ngGridFlexibleHeightPlugin (opts) {
}
}
- var newViewportHeight = naturalHeight + 2;
+ var newViewportHeight = naturalHeight + 3;
if (!self.scope.baseViewportHeight || self.scope.baseViewportHeight !== newViewportHeight) {
self.grid.$viewport.css('height', newViewportHeight + 'px');
self.grid.$root.css('height', (newViewportHeight + extraHeight) + 'px'); | 1 | function ngGridFlexibleHeightPlugin (opts) {
var self = this;
self.grid = null;
self.scope = null;
self.init = function (scope, grid, services) {
self.domUtilityService = services.DomUtilityService;
self.grid = grid;
self.scope = scope;
var recalcHeightForData = function () { setTimeout(innerRecalcForData, 1); };
var innerRecalcForData = function () {
var gridId = self.grid.gridId;
var footerPanelSel = '.' + gridId + ' .ngFooterPanel';
var extraHeight = self.grid.$topPanel.height() + $(footerPanelSel).height();
var naturalHeight = self.grid.$canvas.height() + 1;
if (opts != null) {
if (opts.minHeight != null && (naturalHeight + extraHeight) < opts.minHeight) {
naturalHeight = opts.minHeight - extraHeight - 2;
}
}
var newViewportHeight = naturalHeight + 2;
if (!self.scope.baseViewportHeight || self.scope.baseViewportHeight !== newViewportHeight) {
self.grid.$viewport.css('height', newViewportHeight + 'px');
self.grid.$root.css('height', (newViewportHeight + extraHeight) + 'px');
self.scope.baseViewportHeight = newViewportHeight;
self.domUtilityService.RebuildGrid(self.scope, self.grid);
}
};
self.scope.catHashKeys = function () {
var hash = '',
idx;
for (idx in self.scope.renderedRows) {
hash += self.scope.renderedRows[idx].$$hashKey;
}
return hash;
};
self.scope.$watch('catHashKeys()', innerRecalcForData);
self.scope.$watch(self.grid.config.data, recalcHeightForData);
};
}
| 1 | 9,862 | Bumping the newViewportHeight | angular-ui-ui-grid | js |
@@ -47,7 +47,7 @@ import org.apache.solr.util.LongIterator;
* <li><a href="https://github.com/aggregateknowledge/postgresql-hll">postgresql-hll</a>, and</li>
* <li><a href="https://github.com/aggregateknowledge/js-hll">js-hll</a></li>
* </ul>
- * when <a href="https://github.com/aggregateknowledge/postgresql-hll/blob/master/STORAGE.markdown">properly serialized</a>.
+ * when <a href="https://github.com/aggregateknowledge/postgresql-hll/blob/secondaryy/STORAGE.markdown">properly serialized</a>.
*/
public class HLL implements Cloneable {
// minimum and maximum values for the log-base-2 of the number of registers | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.util.hll;
import java.util.Arrays;
import com.carrotsearch.hppc.IntByteHashMap;
import com.carrotsearch.hppc.LongHashSet;
import com.carrotsearch.hppc.cursors.IntByteCursor;
import com.carrotsearch.hppc.cursors.LongCursor;
import org.apache.solr.util.LongIterator;
/**
* A probabilistic set of hashed <code>long</code> elements. Useful for computing
* the approximate cardinality of a stream of data in very small storage.
*
* A modified version of the <a href="http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf">
* 'HyperLogLog' data structure and algorithm</a> is used, which combines both
* probabilistic and non-probabilistic techniques to improve the accuracy and
* storage requirements of the original algorithm.
*
* More specifically, initializing and storing a new {@link HLL} will
* allocate a sentinel value symbolizing the empty set ({@link HLLType#EMPTY}).
* After adding the first few values, a sorted list of unique integers is
* stored in a {@link HLLType#EXPLICIT} hash set. When configured, accuracy can
* be sacrificed for memory footprint: the values in the sorted list are
* "promoted" to a "{@link HLLType#SPARSE}" map-based HyperLogLog structure.
* Finally, when enough registers are set, the map-based HLL will be converted
* to a bit-packed "{@link HLLType#FULL}" HyperLogLog structure.
*
* This data structure is interoperable with the implementations found at:
* <ul>
* <li><a href="https://github.com/aggregateknowledge/postgresql-hll">postgresql-hll</a>, and</li>
* <li><a href="https://github.com/aggregateknowledge/js-hll">js-hll</a></li>
* </ul>
* when <a href="https://github.com/aggregateknowledge/postgresql-hll/blob/master/STORAGE.markdown">properly serialized</a>.
*/
public class HLL implements Cloneable {
// minimum and maximum values for the log-base-2 of the number of registers
// in the HLL
public static final int MINIMUM_LOG2M_PARAM = 4;
public static final int MAXIMUM_LOG2M_PARAM = 30;
// minimum and maximum values for the register width of the HLL
public static final int MINIMUM_REGWIDTH_PARAM = 1;
public static final int MAXIMUM_REGWIDTH_PARAM = 8;
// minimum and maximum values for the 'expthresh' parameter of the
// constructor that is meant to match the PostgreSQL implementation's
// constructor and parameter names
public static final int MINIMUM_EXPTHRESH_PARAM = -1;
public static final int MAXIMUM_EXPTHRESH_PARAM = 18;
public static final int MAXIMUM_EXPLICIT_THRESHOLD = (1 << (MAXIMUM_EXPTHRESH_PARAM - 1)/*per storage spec*/);
// ************************************************************************
// Storage
// storage used when #type is EXPLICIT, null otherwise
LongHashSet explicitStorage;
// storage used when #type is SPARSE, null otherwise
IntByteHashMap sparseProbabilisticStorage;
// storage used when #type is FULL, null otherwise
BitVector probabilisticStorage;
// current type of this HLL instance, if this changes then so should the
// storage used (see above)
private HLLType type;
// ------------------------------------------------------------------------
// Characteristic parameters
// NOTE: These members are named to match the PostgreSQL implementation's
// parameters.
// log2(the number of probabilistic HLL registers)
private final int log2m;
// the size (width) each register in bits
private final int regwidth;
// ------------------------------------------------------------------------
// Computed constants
// ........................................................................
// EXPLICIT-specific constants
// flag indicating if the EXPLICIT representation should NOT be used
private final boolean explicitOff;
// flag indicating that the promotion threshold from EXPLICIT should be
// computed automatically
// NOTE: this only has meaning when 'explicitOff' is false
private final boolean explicitAuto;
// threshold (in element count) at which a EXPLICIT HLL is converted to a
// SPARSE or FULL HLL, always greater than or equal to zero and always a
// power of two OR simply zero
// NOTE: this only has meaning when 'explicitOff' is false
private final int explicitThreshold;
// ........................................................................
// SPARSE-specific constants
// the computed width of the short words
private final int shortWordLength;
// flag indicating if the SPARSE representation should not be used
private final boolean sparseOff;
// threshold (in register count) at which a SPARSE HLL is converted to a
// FULL HLL, always greater than zero
private final int sparseThreshold;
// ........................................................................
// Probabilistic algorithm constants
// the number of registers, will always be a power of 2
private final int m;
// a mask of the log2m bits set to one and the rest to zero
private final int mBitsMask;
// a mask as wide as a register (see #fromBytes())
private final int valueMask;
// mask used to ensure that p(w) does not overflow register (see #Constructor() and #addRaw())
private final long pwMaxMask;
// alpha * m^2 (the constant in the "'raw' HyperLogLog estimator")
private final double alphaMSquared;
// the cutoff value of the estimator for using the "small" range cardinality
// correction formula
private final double smallEstimatorCutoff;
// the cutoff value of the estimator for using the "large" range cardinality
// correction formula
private final double largeEstimatorCutoff;
// ========================================================================
/**
* NOTE: Arguments here are named and structured identically to those in the
* PostgreSQL implementation, which can be found
* <a href="https://github.com/aggregateknowledge/postgresql-hll/blob/master/README.markdown#explanation-of-parameters-and-tuning">here</a>.
*
* @param log2m log-base-2 of the number of registers used in the HyperLogLog
* algorithm. Must be at least 4 and at most 30.
* @param regwidth number of bits used per register in the HyperLogLog
* algorithm. Must be at least 1 and at most 8.
* @param expthresh tunes when the {@link HLLType#EXPLICIT} to
* {@link HLLType#SPARSE} promotion occurs,
* based on the set's cardinality. Must be at least -1 and at most 18.
* @param sparseon Flag indicating if the {@link HLLType#SPARSE}
* representation should be used.
* @param type the type in the promotion hierarchy which this instance should
* start at. This cannot be <code>null</code>.
*/
public HLL(final int log2m, final int regwidth, final int expthresh, final boolean sparseon, final HLLType type) {
this.log2m = log2m;
if((log2m < MINIMUM_LOG2M_PARAM) || (log2m > MAXIMUM_LOG2M_PARAM)) {
throw new IllegalArgumentException("'log2m' must be at least " + MINIMUM_LOG2M_PARAM + " and at most " + MAXIMUM_LOG2M_PARAM + " (was: " + log2m + ")");
}
this.regwidth = regwidth;
if((regwidth < MINIMUM_REGWIDTH_PARAM) || (regwidth > MAXIMUM_REGWIDTH_PARAM)) {
throw new IllegalArgumentException("'regwidth' must be at least " + MINIMUM_REGWIDTH_PARAM + " and at most " + MAXIMUM_REGWIDTH_PARAM + " (was: " + regwidth + ")");
}
this.m = (1 << log2m);
this.mBitsMask = m - 1;
this.valueMask = (1 << regwidth) - 1;
this.pwMaxMask = HLLUtil.pwMaxMask(regwidth);
this.alphaMSquared = HLLUtil.alphaMSquared(m);
this.smallEstimatorCutoff = HLLUtil.smallEstimatorCutoff(m);
this.largeEstimatorCutoff = HLLUtil.largeEstimatorCutoff(log2m, regwidth);
if(expthresh == -1) {
this.explicitAuto = true;
this.explicitOff = false;
// NOTE: This math matches the size calculation in the PostgreSQL impl.
final long fullRepresentationSize = (this.regwidth * (long)this.m + 7/*round up to next whole byte*/)/Byte.SIZE;
final int numLongs = (int)(fullRepresentationSize / 8/*integer division to round down*/);
if(numLongs > MAXIMUM_EXPLICIT_THRESHOLD) {
this.explicitThreshold = MAXIMUM_EXPLICIT_THRESHOLD;
} else {
this.explicitThreshold = numLongs;
}
} else if(expthresh == 0) {
this.explicitAuto = false;
this.explicitOff = true;
this.explicitThreshold = 0;
} else if((expthresh > 0) && (expthresh <= MAXIMUM_EXPTHRESH_PARAM)){
this.explicitAuto = false;
this.explicitOff = false;
this.explicitThreshold = (1 << (expthresh - 1));
} else {
throw new IllegalArgumentException("'expthresh' must be at least " + MINIMUM_EXPTHRESH_PARAM + " and at most " + MAXIMUM_EXPTHRESH_PARAM + " (was: " + expthresh + ")");
}
this.shortWordLength = (regwidth + log2m);
this.sparseOff = !sparseon;
if(this.sparseOff) {
this.sparseThreshold = 0;
} else {
// TODO improve this cutoff to include the cost overhead of Java
// members/objects
final int largestPow2LessThanCutoff =
(int)NumberUtil.log2((this.m * this.regwidth) / this.shortWordLength);
this.sparseThreshold = (1 << largestPow2LessThanCutoff);
}
initializeStorage(type);
}
/**
* Construct an empty HLL with the given {@code log2m} and {@code regwidth}.
*
* This is equivalent to calling <code>HLL(log2m, regwidth, -1, true, HLLType.EMPTY)</code>.
*
* @param log2m log-base-2 of the number of registers used in the HyperLogLog
* algorithm. Must be at least 4 and at most 30.
* @param regwidth number of bits used per register in the HyperLogLog
* algorithm. Must be at least 1 and at most 8.
*
* @see #HLL(int, int, int, boolean, HLLType)
*/
public HLL(final int log2m, final int regwidth) {
this(log2m, regwidth, -1, true, HLLType.EMPTY);
}
// -------------------------------------------------------------------------
/**
* Convenience constructor for testing. Assumes that both {@link HLLType#EXPLICIT}
* and {@link HLLType#SPARSE} representations should be enabled.
*
* @param log2m log-base-2 of the number of registers used in the HyperLogLog
* algorithm. Must be at least 4 and at most 30.
* @param regwidth number of bits used per register in the HyperLogLog
* algorithm. Must be at least 1 and at most 8.
* @param explicitThreshold cardinality threshold at which the {@link HLLType#EXPLICIT}
* representation should be promoted to {@link HLLType#SPARSE}.
* This must be greater than zero and less than or equal to {@value #MAXIMUM_EXPLICIT_THRESHOLD}.
* @param sparseThreshold register count threshold at which the {@link HLLType#SPARSE}
* representation should be promoted to {@link HLLType#FULL}.
* This must be greater than zero.
* @param type the type in the promotion hierarchy which this instance should
* start at. This cannot be <code>null</code>.
*/
/*package, for testing*/ HLL(final int log2m, final int regwidth, final int explicitThreshold, final int sparseThreshold, final HLLType type) {
this.log2m = log2m;
if((log2m < MINIMUM_LOG2M_PARAM) || (log2m > MAXIMUM_LOG2M_PARAM)) {
throw new IllegalArgumentException("'log2m' must be at least " + MINIMUM_LOG2M_PARAM + " and at most " + MAXIMUM_LOG2M_PARAM + " (was: " + log2m + ")");
}
this.regwidth = regwidth;
if((regwidth < MINIMUM_REGWIDTH_PARAM) || (regwidth > MAXIMUM_REGWIDTH_PARAM)) {
throw new IllegalArgumentException("'regwidth' must be at least " + MINIMUM_REGWIDTH_PARAM + " and at most " + MAXIMUM_REGWIDTH_PARAM + " (was: " + regwidth + ")");
}
this.m = (1 << log2m);
this.mBitsMask = m - 1;
this.valueMask = (1 << regwidth) - 1;
this.pwMaxMask = HLLUtil.pwMaxMask(regwidth);
this.alphaMSquared = HLLUtil.alphaMSquared(m);
this.smallEstimatorCutoff = HLLUtil.smallEstimatorCutoff(m);
this.largeEstimatorCutoff = HLLUtil.largeEstimatorCutoff(log2m, regwidth);
this.explicitAuto = false;
this.explicitOff = false;
this.explicitThreshold = explicitThreshold;
if((explicitThreshold < 1) || (explicitThreshold > MAXIMUM_EXPLICIT_THRESHOLD)) {
throw new IllegalArgumentException("'explicitThreshold' must be at least 1 and at most " + MAXIMUM_EXPLICIT_THRESHOLD + " (was: " + explicitThreshold + ")");
}
this.shortWordLength = (regwidth + log2m);
this.sparseOff = false;
this.sparseThreshold = sparseThreshold;
initializeStorage(type);
}
/**
* @return the type in the promotion hierarchy of this instance. This will
* never be <code>null</code>.
*/
public HLLType getType() { return type; }
// ========================================================================
// Add
/**
* Adds <code>rawValue</code> directly to the HLL.
*
* @param rawValue the value to be added. It is very important that this
* value <em>already be hashed</em> with a strong (but not
* necessarily cryptographic) hash function. For instance, the
* Murmur3 implementation in
* <a href="http://guava-libraries.googlecode.com/git/guava/src/com/google/common/hash/Murmur3_128HashFunction.java">
* Google's Guava</a> library is an excellent hash function for this
* purpose and, for seeds greater than zero, matches the output
* of the hash provided in the PostgreSQL implementation.
*/
public void addRaw(final long rawValue) {
switch(type) {
case EMPTY: {
// NOTE: EMPTY type is always promoted on #addRaw()
if(explicitThreshold > 0) {
initializeStorage(HLLType.EXPLICIT);
explicitStorage.add(rawValue);
} else if(!sparseOff) {
initializeStorage(HLLType.SPARSE);
addRawSparseProbabilistic(rawValue);
} else {
initializeStorage(HLLType.FULL);
addRawProbabilistic(rawValue);
}
return;
}
case EXPLICIT: {
explicitStorage.add(rawValue);
// promotion, if necessary
if(explicitStorage.size() > explicitThreshold) {
if(!sparseOff) {
initializeStorage(HLLType.SPARSE);
for (LongCursor c : explicitStorage) {
addRawSparseProbabilistic(c.value);
}
} else {
initializeStorage(HLLType.FULL);
for (LongCursor c : explicitStorage) {
addRawProbabilistic(c.value);
}
}
explicitStorage = null;
}
return;
}
case SPARSE: {
addRawSparseProbabilistic(rawValue);
// promotion, if necessary
if(sparseProbabilisticStorage.size() > sparseThreshold) {
initializeStorage(HLLType.FULL);
for(IntByteCursor c : sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
probabilisticStorage.setMaxRegister(registerIndex, registerValue);
}
sparseProbabilisticStorage = null;
}
return;
}
case FULL:
addRawProbabilistic(rawValue);
return;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
}
// ------------------------------------------------------------------------
// #addRaw(..) helpers
/**
* Adds the raw value to the {@link #sparseProbabilisticStorage}.
* {@link #type} must be {@link HLLType#SPARSE}.
*
* @param rawValue the raw value to add to the sparse storage.
*/
private void addRawSparseProbabilistic(final long rawValue) {
// p(w): position of the least significant set bit (one-indexed)
// By contract: p(w) <= 2^(registerValueInBits) - 1 (the max register value)
//
// By construction of pwMaxMask (see #Constructor()),
// lsb(pwMaxMask) = 2^(registerValueInBits) - 2,
// thus lsb(any_long | pwMaxMask) <= 2^(registerValueInBits) - 2,
// thus 1 + lsb(any_long | pwMaxMask) <= 2^(registerValueInBits) -1.
final long substreamValue = (rawValue >>> log2m);
final byte p_w;
if(substreamValue == 0L) {
// The paper does not cover p(0x0), so the special value 0 is used.
// 0 is the original initialization value of the registers, so by
// doing this the multiset simply ignores it. This is acceptable
// because the probability is 1/(2^(2^registerSizeInBits)).
p_w = 0;
} else {
p_w = (byte)(1 + BitUtil.leastSignificantBit(substreamValue| pwMaxMask));
}
// Short-circuit if the register is being set to zero, since algorithmically
// this corresponds to an "unset" register, and "unset" registers aren't
// stored to save memory. (The very reason this sparse implementation
// exists.) If a register is set to zero it will break the #algorithmCardinality
// code.
if(p_w == 0) {
return;
}
// NOTE: no +1 as in paper since 0-based indexing
final int j = (int)(rawValue & mBitsMask);
final byte currentValue;
final int index = sparseProbabilisticStorage.indexOf(j);
if (index >= 0) {
currentValue = sparseProbabilisticStorage.indexGet(index);
} else {
currentValue = 0;
}
if(p_w > currentValue) {
sparseProbabilisticStorage.put(j, p_w);
}
}
/**
* Adds the raw value to the {@link #probabilisticStorage}.
* {@link #type} must be {@link HLLType#FULL}.
*
* @param rawValue the raw value to add to the full probabilistic storage.
*/
private void addRawProbabilistic(final long rawValue) {
// p(w): position of the least significant set bit (one-indexed)
// By contract: p(w) <= 2^(registerValueInBits) - 1 (the max register value)
//
// By construction of pwMaxMask (see #Constructor()),
// lsb(pwMaxMask) = 2^(registerValueInBits) - 2,
// thus lsb(any_long | pwMaxMask) <= 2^(registerValueInBits) - 2,
// thus 1 + lsb(any_long | pwMaxMask) <= 2^(registerValueInBits) -1.
final long substreamValue = (rawValue >>> log2m);
final byte p_w;
if (substreamValue == 0L) {
// The paper does not cover p(0x0), so the special value 0 is used.
// 0 is the original initialization value of the registers, so by
// doing this the multiset simply ignores it. This is acceptable
// because the probability is 1/(2^(2^registerSizeInBits)).
p_w = 0;
} else {
p_w = (byte)(1 + BitUtil.leastSignificantBit(substreamValue| pwMaxMask));
}
// Short-circuit if the register is being set to zero, since algorithmically
// this corresponds to an "unset" register, and "unset" registers aren't
// stored to save memory. (The very reason this sparse implementation
// exists.) If a register is set to zero it will break the #algorithmCardinality
// code.
if(p_w == 0) {
return;
}
// NOTE: no +1 as in paper since 0-based indexing
final int j = (int)(rawValue & mBitsMask);
probabilisticStorage.setMaxRegister(j, p_w);
}
// ------------------------------------------------------------------------
// Storage helper
/**
* Initializes storage for the specified {@link HLLType} and changes the
* instance's {@link #type}.
*
* @param type the {@link HLLType} to initialize storage for. This cannot be
* <code>null</code> and must be an instantiable type.
*/
private void initializeStorage(final HLLType type) {
this.type = type;
switch(type) {
case EMPTY:
// nothing to be done
break;
case EXPLICIT:
this.explicitStorage = new LongHashSet();
break;
case SPARSE:
this.sparseProbabilisticStorage = new IntByteHashMap();
break;
case FULL:
this.probabilisticStorage = new BitVector(regwidth, m);
break;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
}
// ========================================================================
// Cardinality
/**
* Computes the cardinality of the HLL.
*
* @return the cardinality of HLL. This will never be negative.
*/
public long cardinality() {
switch(type) {
case EMPTY:
return 0/*by definition*/;
case EXPLICIT:
return explicitStorage.size();
case SPARSE:
return (long)Math.ceil(sparseProbabilisticAlgorithmCardinality());
case FULL:
return (long)Math.ceil(fullProbabilisticAlgorithmCardinality());
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
}
// ------------------------------------------------------------------------
// Cardinality helpers
/**
* Computes the exact cardinality value returned by the HLL algorithm when
* represented as a {@link HLLType#SPARSE} HLL. Kept
* separate from {@link #cardinality()} for testing purposes. {@link #type}
* must be {@link HLLType#SPARSE}.
*
* @return the exact, unrounded cardinality given by the HLL algorithm
*/
/*package, for testing*/ double sparseProbabilisticAlgorithmCardinality() {
final int m = this.m/*for performance*/;
// compute the "indicator function" -- sum(2^(-M[j])) where M[j] is the
// 'j'th register value
double sum = 0;
int numberOfZeroes = 0/*"V" in the paper*/;
for(int j=0; j<m; j++) {
final long register;
if (sparseProbabilisticStorage.containsKey(j)) {
register = sparseProbabilisticStorage.get(j);
} else {
register = 0;
}
sum += 1.0 / (1L << register);
if(register == 0L) numberOfZeroes++;
}
// apply the estimate and correction to the indicator function
final double estimator = alphaMSquared / sum;
if((numberOfZeroes != 0) && (estimator < smallEstimatorCutoff)) {
return HLLUtil.smallEstimator(m, numberOfZeroes);
} else if(estimator <= largeEstimatorCutoff) {
return estimator;
} else {
return HLLUtil.largeEstimator(log2m, regwidth, estimator);
}
}
/**
* Computes the exact cardinality value returned by the HLL algorithm when
* represented as a {@link HLLType#FULL} HLL. Kept
* separate from {@link #cardinality()} for testing purposes. {@link #type}
* must be {@link HLLType#FULL}.
*
* @return the exact, unrounded cardinality given by the HLL algorithm
*/
/*package, for testing*/ double fullProbabilisticAlgorithmCardinality() {
final int m = this.m/*for performance*/;
// compute the "indicator function" -- sum(2^(-M[j])) where M[j] is the
// 'j'th register value
double sum = 0;
int numberOfZeroes = 0/*"V" in the paper*/;
final LongIterator iterator = probabilisticStorage.registerIterator();
while(iterator.hasNext()) {
final long register = iterator.next();
sum += 1.0 / (1L << register);
if(register == 0L) numberOfZeroes++;
}
// apply the estimate and correction to the indicator function
final double estimator = alphaMSquared / sum;
if((numberOfZeroes != 0) && (estimator < smallEstimatorCutoff)) {
return HLLUtil.smallEstimator(m, numberOfZeroes);
} else if(estimator <= largeEstimatorCutoff) {
return estimator;
} else {
return HLLUtil.largeEstimator(log2m, regwidth, estimator);
}
}
// ========================================================================
// Clear
/**
* Clears the HLL. The HLL will have cardinality zero and will act as if no
* elements have been added.
*
* NOTE: Unlike {@link #addRaw(long)}, <code>clear</code> does NOT handle
* transitions between {@link HLLType}s - a probabilistic type will remain
* probabilistic after being cleared.
*/
public void clear() {
switch(type) {
case EMPTY:
return /*do nothing*/;
case EXPLICIT:
explicitStorage.clear();
return;
case SPARSE:
sparseProbabilisticStorage.clear();
return;
case FULL:
probabilisticStorage.fill(0);
return;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
}
// ========================================================================
// Union
/**
* Computes the union of HLLs and stores the result in this instance.
*
* @param other the other {@link HLL} instance to union into this one. This
* cannot be <code>null</code>.
*/
public void union(final HLL other) {
// TODO: verify HLLs are compatible
final HLLType otherType = other.getType();
if(type.equals(otherType)) {
homogeneousUnion(other);
return;
} else {
heterogenousUnion(other);
return;
}
}
// ------------------------------------------------------------------------
// Union helpers
/**
* Computes the union of two HLLs, of different types, and stores the
* result in this instance.
*
* @param other the other {@link HLL} instance to union into this one. This
* cannot be <code>null</code>.
*/
/*package, for testing*/ void heterogenousUnion(final HLL other) {
/*
* The logic here is divided into two sections: unions with an EMPTY
* HLL, and unions between EXPLICIT/SPARSE/FULL
* HLL.
*
* Between those two sections, all possible heterogeneous unions are
* covered. Should another type be added to HLLType whose unions
* are not easily reduced (say, as EMPTY's are below) this may be more
* easily implemented as Strategies. However, that is unnecessary as it
* stands.
*/
// ....................................................................
// Union with an EMPTY
if(HLLType.EMPTY.equals(type)) {
// NOTE: The union of empty with non-empty HLL is just a
// clone of the non-empty.
switch(other.getType()) {
case EXPLICIT: {
// src: EXPLICIT
// dest: EMPTY
if(other.explicitStorage.size() <= explicitThreshold) {
type = HLLType.EXPLICIT;
explicitStorage = other.explicitStorage.clone();
} else {
if(!sparseOff) {
initializeStorage(HLLType.SPARSE);
} else {
initializeStorage(HLLType.FULL);
}
for(LongCursor c : other.explicitStorage) {
addRaw(c.value);
}
}
return;
}
case SPARSE: {
// src: SPARSE
// dest: EMPTY
if(!sparseOff) {
type = HLLType.SPARSE;
sparseProbabilisticStorage = other.sparseProbabilisticStorage.clone();
} else {
initializeStorage(HLLType.FULL);
for(IntByteCursor c : other.sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
probabilisticStorage.setMaxRegister(registerIndex, registerValue);
}
}
return;
}
default/*case FULL*/: {
// src: FULL
// dest: EMPTY
type = HLLType.FULL;
probabilisticStorage = other.probabilisticStorage.clone();
return;
}
}
} else if (HLLType.EMPTY.equals(other.getType())) {
// source is empty, so just return destination since it is unchanged
return;
} /* else -- both of the sets are not empty */
// ....................................................................
// NOTE: Since EMPTY is handled above, the HLLs are non-EMPTY below
switch(type) {
case EXPLICIT: {
// src: FULL/SPARSE
// dest: EXPLICIT
// "Storing into destination" cannot be done (since destination
// is by definition of smaller capacity than source), so a clone
// of source is made and values from destination are inserted
// into that.
// Determine source and destination storage.
// NOTE: destination storage may change through promotion if
// source is SPARSE.
if(HLLType.SPARSE.equals(other.getType())) {
if(!sparseOff) {
type = HLLType.SPARSE;
sparseProbabilisticStorage = other.sparseProbabilisticStorage.clone();
} else {
initializeStorage(HLLType.FULL);
for(IntByteCursor c : other.sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
probabilisticStorage.setMaxRegister(registerIndex, registerValue);
}
}
} else /*source is HLLType.FULL*/ {
type = HLLType.FULL;
probabilisticStorage = other.probabilisticStorage.clone();
}
for(LongCursor c : explicitStorage) {
addRaw(c.value);
}
explicitStorage = null;
return;
}
case SPARSE: {
if(HLLType.EXPLICIT.equals(other.getType())) {
// src: EXPLICIT
// dest: SPARSE
// Add the raw values from the source to the destination.
for(LongCursor c : other.explicitStorage) {
addRaw(c.value);
}
// NOTE: addRaw will handle promotion cleanup
} else /*source is HLLType.FULL*/ {
// src: FULL
// dest: SPARSE
// "Storing into destination" cannot be done (since destination
// is by definition of smaller capacity than source), so a
// clone of source is made and registers from the destination
// are merged into the clone.
type = HLLType.FULL;
probabilisticStorage = other.probabilisticStorage.clone();
for(IntByteCursor c : sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
probabilisticStorage.setMaxRegister(registerIndex, registerValue);
}
sparseProbabilisticStorage = null;
}
return;
}
default/*destination is HLLType.FULL*/: {
if(HLLType.EXPLICIT.equals(other.getType())) {
// src: EXPLICIT
// dest: FULL
// Add the raw values from the source to the destination.
// Promotion is not possible, so don't bother checking.
for(LongCursor c : other.explicitStorage) {
addRaw(c.value);
}
} else /*source is HLLType.SPARSE*/ {
// src: SPARSE
// dest: FULL
// Merge the registers from the source into the destination.
// Promotion is not possible, so don't bother checking.
for(IntByteCursor c : other.sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
probabilisticStorage.setMaxRegister(registerIndex, registerValue);
}
}
}
}
}
/**
* Computes the union of two HLLs of the same type, and stores the
* result in this instance.
*
* @param other the other {@link HLL} instance to union into this one. This
* cannot be <code>null</code>.
*/
private void homogeneousUnion(final HLL other) {
switch(type) {
case EMPTY:
// union of empty and empty is empty
return;
case EXPLICIT:
for(LongCursor c : other.explicitStorage) {
addRaw(c.value);
}
// NOTE: #addRaw() will handle promotion, if necessary
return;
case SPARSE:
for(IntByteCursor c : other.sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
final byte currentRegisterValue = sparseProbabilisticStorage.get(registerIndex);
if(registerValue > currentRegisterValue) {
sparseProbabilisticStorage.put(registerIndex, registerValue);
}
}
// promotion, if necessary
if(sparseProbabilisticStorage.size() > sparseThreshold) {
initializeStorage(HLLType.FULL);
for(IntByteCursor c : sparseProbabilisticStorage) {
final int registerIndex = c.key;
final byte registerValue = c.value;
probabilisticStorage.setMaxRegister(registerIndex, registerValue);
}
sparseProbabilisticStorage = null;
}
return;
case FULL:
for(int i=0; i<m; i++) {
final long registerValue = other.probabilisticStorage.getRegister(i);
probabilisticStorage.setMaxRegister(i, registerValue);
}
return;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
}
// ========================================================================
// Serialization
/**
* Serializes the HLL to an array of bytes in correspondence with the format
* of the default schema version, {@link SerializationUtil#DEFAULT_SCHEMA_VERSION}.
*
* @return the array of bytes representing the HLL. This will never be
* <code>null</code> or empty.
*/
public byte[] toBytes() {
return toBytes(SerializationUtil.DEFAULT_SCHEMA_VERSION);
}
/**
* Serializes the HLL to an array of bytes in correspondence with the format
* of the specified schema version.
*
* @param schemaVersion the schema version dictating the serialization format
* @return the array of bytes representing the HLL. This will never be
* <code>null</code> or empty.
*/
public byte[] toBytes(final ISchemaVersion schemaVersion) {
final byte[] bytes;
switch(type) {
case EMPTY:
bytes = new byte[schemaVersion.paddingBytes(type)];
break;
case EXPLICIT: {
final IWordSerializer serializer =
schemaVersion.getSerializer(type, Long.SIZE, explicitStorage.size());
final long[] values = explicitStorage.toArray();
Arrays.sort(values);
for(final long value : values) {
serializer.writeWord(value);
}
bytes = serializer.getBytes();
break;
}
case SPARSE: {
final IWordSerializer serializer =
schemaVersion.getSerializer(type, shortWordLength, sparseProbabilisticStorage.size());
final int[] indices = sparseProbabilisticStorage.keys().toArray();
Arrays.sort(indices);
for(final int registerIndex : indices) {
assert sparseProbabilisticStorage.containsKey(registerIndex);
final long registerValue = sparseProbabilisticStorage.get(registerIndex);
// pack index and value into "short word"
final long shortWord = ((registerIndex << regwidth) | registerValue);
serializer.writeWord(shortWord);
}
bytes = serializer.getBytes();
break;
}
case FULL: {
final IWordSerializer serializer = schemaVersion.getSerializer(type, regwidth, m);
probabilisticStorage.getRegisterContents(serializer);
bytes = serializer.getBytes();
break;
}
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
final IHLLMetadata metadata = new HLLMetadata(schemaVersion.schemaVersionNumber(),
type,
log2m,
regwidth,
(int)NumberUtil.log2(explicitThreshold),
explicitOff,
explicitAuto,
!sparseOff);
schemaVersion.writeMetadata(bytes, metadata);
return bytes;
}
/**
* Deserializes the HLL (in {@link #toBytes(ISchemaVersion)} format) serialized
* into <code>bytes</code>.
*
* @param bytes the serialized bytes of new HLL
* @return the deserialized HLL. This will never be <code>null</code>.
*
* @see #toBytes(ISchemaVersion)
*/
public static HLL fromBytes(final byte[] bytes) {
final ISchemaVersion schemaVersion = SerializationUtil.getSchemaVersion(bytes);
final IHLLMetadata metadata = schemaVersion.readMetadata(bytes);
final HLLType type = metadata.HLLType();
final int regwidth = metadata.registerWidth();
final int log2m = metadata.registerCountLog2();
final boolean sparseon = metadata.sparseEnabled();
final int expthresh;
if(metadata.explicitAuto()) {
expthresh = -1;
} else if(metadata.explicitOff()) {
expthresh = 0;
} else {
// NOTE: take into account that the postgres-compatible constructor
// subtracts one before taking a power of two.
expthresh = metadata.log2ExplicitCutoff() + 1;
}
final HLL hll = new HLL(log2m, regwidth, expthresh, sparseon, type);
// Short-circuit on empty, which needs no other deserialization.
if(HLLType.EMPTY.equals(type)) {
return hll;
}
final int wordLength;
switch(type) {
case EXPLICIT:
wordLength = Long.SIZE;
break;
case SPARSE:
wordLength = hll.shortWordLength;
break;
case FULL:
wordLength = hll.regwidth;
break;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
final IWordDeserializer deserializer =
schemaVersion.getDeserializer(type, wordLength, bytes);
switch(type) {
case EXPLICIT:
// NOTE: This should not exceed expthresh and this will always
// be exactly the number of words that were encoded,
// because the word length is at least a byte wide.
// SEE: IWordDeserializer#totalWordCount()
for(int i=0; i<deserializer.totalWordCount(); i++) {
hll.explicitStorage.add(deserializer.readWord());
}
break;
case SPARSE:
// NOTE: If the shortWordLength were smaller than 8 bits
// (1 byte) there would be a possibility (because of
// padding arithmetic) of having one or more extra
// registers read. However, this is not relevant as the
// extra registers will be all zeroes, which are ignored
// in the sparse representation.
for(int i=0; i<deserializer.totalWordCount(); i++) {
final long shortWord = deserializer.readWord();
final byte registerValue = (byte)(shortWord & hll.valueMask);
// Only set non-zero registers.
if (registerValue != 0) {
hll.sparseProbabilisticStorage.put((int)(shortWord >>> hll.regwidth), registerValue);
}
}
break;
case FULL:
// NOTE: Iteration is done using m (register count) and NOT
// deserializer#totalWordCount() because regwidth may be
// less than 8 and as such the padding on the 'last' byte
// may be larger than regwidth, causing an extra register
// to be read.
// SEE: IWordDeserializer#totalWordCount()
for(long i=0; i<hll.m; i++) {
hll.probabilisticStorage.setRegister(i, deserializer.readWord());
}
break;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
return hll;
}
/**
* Create a deep copy of this HLL.
*
* @see java.lang.Object#clone()
*/
@Override
public HLL clone() throws CloneNotSupportedException {
// NOTE: Since the package-only constructor assumes both explicit and
// sparse are enabled, the easiest thing to do here is to re-derive
// the expthresh parameter and create a new HLL with the public
// constructor.
// TODO: add a more sensible constructor to make this less obfuscated
final int copyExpthresh;
if(explicitAuto) {
copyExpthresh = -1;
} else if(explicitOff) {
copyExpthresh = 0;
} else {
// explicitThreshold is defined as:
//
// this.explicitThreshold = (1 << (expthresh - 1));
//
// Since explicitThreshold is a power of two and only has a single
// bit set, finding the LSB is the same as finding the inverse
copyExpthresh = BitUtil.leastSignificantBit(explicitThreshold) + 1;
}
final HLL copy = new HLL(log2m, regwidth, copyExpthresh, !sparseOff/*sparseOn*/, type);
switch(type) {
case EMPTY:
// nothing to be done
break;
case EXPLICIT:
copy.explicitStorage = this.explicitStorage.clone();
break;
case SPARSE:
copy.sparseProbabilisticStorage = this.sparseProbabilisticStorage.clone();
break;
case FULL:
copy.probabilisticStorage = this.probabilisticStorage.clone();
break;
default:
throw new RuntimeException("Unsupported HLL type " + type);
}
return copy;
}
}
| 1 | 35,955 | Is this change correct? Looks like a typo and not sure this should be changed? | apache-lucene-solr | java |
@@ -43,6 +43,14 @@ def _read_stdin():
return sys.stdin.read()
+def _load_reporter_by_class(reporter_class: str) -> type:
+ qname = reporter_class
+ module_part = astroid.modutils.get_module_part(qname)
+ module = astroid.modutils.load_module_from_name(module_part)
+ class_name = qname.split(".")[-1]
+ return getattr(module, class_name)
+
+
# Python Linter class #########################################################
MSGS = { | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
import collections
import contextlib
import functools
import operator
import os
import sys
import tokenize
import traceback
import warnings
from io import TextIOWrapper
import astroid
from pylint import checkers, config, exceptions, interfaces, reporters
from pylint.constants import MAIN_CHECKER_NAME, MSG_TYPES
from pylint.lint.expand_modules import expand_modules
from pylint.lint.parallel import check_parallel
from pylint.lint.report_functions import (
report_messages_by_module_stats,
report_messages_stats,
report_total_messages_stats,
)
from pylint.lint.utils import fix_import_path
from pylint.message import MessageDefinitionStore, MessagesHandlerMixIn
from pylint.reporters.ureports import nodes as report_nodes
from pylint.utils import ASTWalker, FileState, utils
from pylint.utils.pragma_parser import (
OPTION_PO,
InvalidPragmaError,
UnRecognizedOptionError,
parse_pragma,
)
MANAGER = astroid.MANAGER
def _read_stdin():
# https://mail.python.org/pipermail/python-list/2012-November/634424.html
sys.stdin = TextIOWrapper(sys.stdin.detach(), encoding="utf-8")
return sys.stdin.read()
# Python Linter class #########################################################
MSGS = {
"F0001": (
"%s",
"fatal",
"Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).",
),
"F0002": (
"%s: %s",
"astroid-error",
"Used when an unexpected error occurred while building the "
"Astroid representation. This is usually accompanied by a "
"traceback. Please report such errors !",
),
"F0010": (
"error while code parsing: %s",
"parse-error",
"Used when an exception occurred while building the Astroid "
"representation which could be handled by astroid.",
),
"I0001": (
"Unable to run raw checkers on built-in module %s",
"raw-checker-failed",
"Used to inform that a built-in module has not been checked "
"using the raw checkers.",
),
"I0010": (
"Unable to consider inline option %r",
"bad-inline-option",
"Used when an inline option is either badly formatted or can't "
"be used inside modules.",
),
"I0011": (
"Locally disabling %s (%s)",
"locally-disabled",
"Used when an inline option disables a message or a messages category.",
),
"I0013": (
"Ignoring entire file",
"file-ignored",
"Used to inform that the file will not be checked",
),
"I0020": (
"Suppressed %s (from line %d)",
"suppressed-message",
"A message was triggered on a line, but suppressed explicitly "
"by a disable= comment in the file. This message is not "
"generated for messages that are ignored due to configuration "
"settings.",
),
"I0021": (
"Useless suppression of %s",
"useless-suppression",
"Reported when a message is explicitly disabled for a line or "
"a block of code, but never triggered.",
),
"I0022": (
'Pragma "%s" is deprecated, use "%s" instead',
"deprecated-pragma",
"Some inline pylint options have been renamed or reworked, "
"only the most recent form should be used. "
"NOTE:skip-all is only available with pylint >= 0.26",
{"old_names": [("I0014", "deprecated-disable-all")]},
),
"E0001": ("%s", "syntax-error", "Used when a syntax error is raised for a module."),
"E0011": (
"Unrecognized file option %r",
"unrecognized-inline-option",
"Used when an unknown inline option is encountered.",
),
"E0012": (
"Bad option value %r",
"bad-option-value",
"Used when a bad value for an inline option is encountered.",
),
}
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class PyLinter(
config.OptionsManagerMixIn,
MessagesHandlerMixIn,
reporters.ReportsHandlerMixIn,
checkers.BaseTokenChecker,
):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugin developers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` across runs if you want
to ensure the latest code version is actually checked.
This class needs to support pickling for parallel linting to work. The exception
is reporter member; see check_parallel function for more details.
"""
__implements__ = (interfaces.ITokenChecker,)
name = MAIN_CHECKER_NAME
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (
(
"ignore",
{
"type": "csv",
"metavar": "<file>[,<file>...]",
"dest": "black_list",
"default": ("CVS",),
"help": "Files or directories to be skipped. "
"They should be base names, not paths.",
},
),
(
"ignore-patterns",
{
"type": "regexp_csv",
"metavar": "<pattern>[,<pattern>...]",
"dest": "black_list_re",
"default": (),
"help": "Files or directories matching the regex patterns are"
" skipped. The regex matches against base names, not paths.",
},
),
(
"persistent",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"level": 1,
"help": "Pickle collected data for later comparisons.",
},
),
(
"load-plugins",
{
"type": "csv",
"metavar": "<modules>",
"default": (),
"level": 1,
"help": "List of plugins (as comma separated values of "
"python module names) to load, usually to register "
"additional checkers.",
},
),
(
"output-format",
{
"default": "text",
"type": "string",
"metavar": "<format>",
"short": "f",
"group": "Reports",
"help": "Set the output format. Available formats are text,"
" parseable, colorized, json and msvs (visual studio)."
" You can also give a reporter class, e.g. mypackage.mymodule."
"MyReporterClass.",
},
),
(
"reports",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"short": "r",
"group": "Reports",
"help": "Tells whether to display a full report or only the "
"messages.",
},
),
(
"evaluation",
{
"type": "string",
"metavar": "<python_expression>",
"group": "Reports",
"level": 1,
"default": "10.0 - ((float(5 * error + warning + refactor + "
"convention) / statement) * 10)",
"help": "Python expression which should return a score less "
"than or equal to 10. You have access to the variables "
"'error', 'warning', 'refactor', and 'convention' which "
"contain the number of messages in each category, as well as "
"'statement' which is the total number of statements "
"analyzed. This score is used by the global "
"evaluation report (RP0004).",
},
),
(
"score",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"short": "s",
"group": "Reports",
"help": "Activate the evaluation score.",
},
),
(
"fail-under",
{
"default": 10,
"type": "float",
"metavar": "<score>",
"help": "Specify a score threshold to be exceeded before program exits with error.",
},
),
(
"fail-on",
{
"default": "",
"type": "csv",
"metavar": "<msg ids>",
"help": "Return non-zero exit code if any of these messages/categories are detected,"
" even if score is above --fail-under value. Syntax same as enable."
" Messages specified are enabled, while categories only check already-enabled messages.",
},
),
(
"confidence",
{
"type": "multiple_choice",
"metavar": "<levels>",
"default": "",
"choices": [c.name for c in interfaces.CONFIDENCE_LEVELS],
"group": "Messages control",
"help": "Only show warnings with the listed confidence levels."
" Leave empty to show all. Valid levels: %s."
% (", ".join(c.name for c in interfaces.CONFIDENCE_LEVELS),),
},
),
(
"enable",
{
"type": "csv",
"metavar": "<msg ids>",
"short": "e",
"group": "Messages control",
"help": "Enable the message, report, category or checker with the "
"given id(s). You can either give multiple identifier "
"separated by comma (,) or put this option multiple time "
"(only on the command line, not in the configuration file "
"where it should appear only once). "
'See also the "--disable" option for examples.',
},
),
(
"disable",
{
"type": "csv",
"metavar": "<msg ids>",
"short": "d",
"group": "Messages control",
"help": "Disable the message, report, category or checker "
"with the given id(s). You can either give multiple identifiers "
"separated by comma (,) or put this option multiple times "
"(only on the command line, not in the configuration file "
"where it should appear only once). "
'You can also use "--disable=all" to disable everything first '
"and then reenable specific checks. For example, if you want "
"to run only the similarities checker, you can use "
'"--disable=all --enable=similarities". '
"If you want to run only the classes checker, but have no "
"Warning level messages displayed, use "
'"--disable=all --enable=classes --disable=W".',
},
),
(
"msg-template",
{
"type": "string",
"metavar": "<template>",
"group": "Reports",
"help": (
"Template used to display messages. "
"This is a python new-style format string "
"used to format the message information. "
"See doc for all details."
),
},
),
(
"jobs",
{
"type": "int",
"metavar": "<n-processes>",
"short": "j",
"default": 1,
"help": "Use multiple processes to speed up Pylint. Specifying 0 will "
"auto-detect the number of processors available to use.",
},
),
(
"unsafe-load-any-extension",
{
"type": "yn",
"metavar": "<yn>",
"default": False,
"hide": True,
"help": (
"Allow loading of arbitrary C extensions. Extensions"
" are imported into the active Python interpreter and"
" may run arbitrary code."
),
},
),
(
"limit-inference-results",
{
"type": "int",
"metavar": "<number-of-results>",
"default": 100,
"help": (
"Control the amount of potential inferred values when inferring "
"a single object. This can help the performance when dealing with "
"large functions or complex, nested conditions. "
),
},
),
(
"extension-pkg-allow-list",
{
"type": "csv",
"metavar": "<pkg[,pkg]>",
"default": [],
"help": (
"A comma-separated list of package or module names"
" from where C extensions may be loaded. Extensions are"
" loading into the active Python interpreter and may run"
" arbitrary code."
),
},
),
(
"extension-pkg-whitelist",
{
"type": "csv",
"metavar": "<pkg[,pkg]>",
"default": [],
"help": (
"A comma-separated list of package or module names"
" from where C extensions may be loaded. Extensions are"
" loading into the active Python interpreter and may run"
" arbitrary code. (This is an alternative name to"
" extension-pkg-allow-list for backward compatibility.)"
),
},
),
(
"suggestion-mode",
{
"type": "yn",
"metavar": "<yn>",
"default": True,
"help": (
"When enabled, pylint would attempt to guess common "
"misconfiguration and emit user-friendly hints instead "
"of false-positive error messages."
),
},
),
(
"exit-zero",
{
"action": "store_true",
"help": (
"Always return a 0 (non-error) status code, even if "
"lint errors are found. This is primarily useful in "
"continuous integration scripts."
),
},
),
(
"from-stdin",
{
"action": "store_true",
"help": (
"Interpret the stdin as a python script, whose filename "
"needs to be passed as the module_or_package argument."
),
},
),
)
option_groups = (
("Messages control", "Options controlling analysis messages"),
("Reports", "Options related to output formatting and reporting"),
)
def __init__(self, options=(), reporter=None, option_groups=(), pylintrc=None):
"""Some stuff has to be done before ancestors initialization...
messages store / checkers / reporter / astroid manager"""
self.msgs_store = MessageDefinitionStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = FileState()
self.current_name = None
self.current_file = None
self.stats = None
self.fail_on_symbols = []
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {"enable": self.enable, "disable": self.disable}
self._bw_options_methods = {
"disable-msg": self._options_methods["disable"],
"enable-msg": self._options_methods["enable"],
}
MessagesHandlerMixIn.__init__(self)
reporters.ReportsHandlerMixIn.__init__(self)
super().__init__(
usage=__doc__,
config_file=pylintrc or next(config.find_default_config_files(), None),
)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (
("RP0001", "Messages by category", report_total_messages_stats),
(
"RP0002",
"% errors / warnings by module",
report_messages_by_module_stats,
),
("RP0003", "Messages", report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = astroid.modutils.load_module_from_name(modname)
module.register(self)
def load_plugin_configuration(self):
"""Call the configuration hook for plugins
This walks through the list of plugins, grabs the "load_configuration"
hook, if exposed, and calls it to allow plugins to configure specific
settings.
"""
for modname in self._dynamic_plugins:
module = astroid.modutils.load_module_from_name(modname)
if hasattr(module, "load_configuration"):
module.load_configuration(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
try:
reporter_class = self._load_reporter_class()
except (ImportError, AttributeError) as e:
raise exceptions.InvalidReporterError(name) from e
else:
self.set_reporter(reporter_class())
def _load_reporter_class(self):
qname = self._reporter_name
module_part = astroid.modutils.get_module_part(qname)
module = astroid.modutils.load_module_from_name(module_part)
class_name = qname.split(".")[-1]
reporter_class = getattr(module, class_name)
return reporter_class
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn(
"%s is deprecated, replace it by %s"
% (optname, optname.split("-")[0]),
DeprecationWarning,
)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == "output-format":
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname, value, action, optdict)
except config.UnsupportedAction:
print("option %s can't be read from config file" % optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, "name", ""))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, "checker priority can't be >= 0"
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, "msgs"):
self.msgs_store.register_messages_from_checker(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
if not getattr(checker, "enabled", True):
self.disable(checker.name)
def enable_fail_on_messages(self):
"""enable 'fail on' msgs
Convert values in config.fail_on (which might be msg category, msg id,
or symbol) to specific msgs, then enable and flag them for later.
"""
fail_on_vals = self.config.fail_on
if not fail_on_vals:
return
fail_on_cats = set()
fail_on_msgs = set()
for val in fail_on_vals:
# If value is a cateogry, add category, else add message
if val in MSG_TYPES:
fail_on_cats.add(val)
else:
fail_on_msgs.add(val)
# For every message in every checker, if cat or msg flagged, enable check
for all_checkers in self._checkers.values():
for checker in all_checkers:
for msg in checker.messages:
if msg.msgid in fail_on_msgs or msg.symbol in fail_on_msgs:
# message id/symbol matched, enable and flag it
self.enable(msg.msgid)
self.fail_on_symbols.append(msg.symbol)
elif msg.msgid[0] in fail_on_cats:
# message starts with a cateogry value, flag (but do not enable) it
self.fail_on_symbols.append(msg.symbol)
def any_fail_on_issues(self):
return any(x in self.fail_on_symbols for x in self.stats["by_msg"])
def disable_noerror_messages(self):
for msgcat, msgids in self.msgs_store._msgs_by_category.items():
# enable only messages with 'error' severity and above ('fatal')
if msgcat in ["E", "F"]:
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in self._reports.values():
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable("miscellaneous")
if self._python3_porting_mode:
self.disable("all")
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
else:
self.disable("python3")
self.set_option("reports", False)
self.set_option("persistent", False)
self.set_option("score", False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable("all")
# re-enable some errors, or 'print', 'raise', 'async', 'await' will mistakenly lint fine
self.enable("fatal") # F0001
self.enable("astroid-error") # F0002
self.enable("parse-error") # F0010
self.enable("syntax-error") # E0001
self.enable("python3")
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
else:
self.disable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
self._python3_porting_mode = True
def list_messages_enabled(self):
enabled = [
f" {message.symbol} ({message.msgid})"
for message in self.msgs_store.messages
if self.is_message_enabled(message.msgid)
]
disabled = [
f" {message.symbol} ({message.msgid})"
for message in self.msgs_store.messages
if not self.is_message_enabled(message.msgid)
]
print("Enabled messages:")
for msg in sorted(enabled):
print(msg)
print("\nDisabled messages:")
for msg in sorted(disabled):
print(msg)
print("")
# block level option handling #############################################
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""Process tokens from the current module to search for module/block level
options."""
control_pragmas = {"disable", "enable"}
prev_line = None
saw_newline = True
seen_newline = True
for (tok_type, content, start, _, _) in tokens:
if prev_line and prev_line != start[0]:
saw_newline = seen_newline
seen_newline = False
prev_line = start[0]
if tok_type in (tokenize.NL, tokenize.NEWLINE):
seen_newline = True
if tok_type != tokenize.COMMENT:
continue
match = OPTION_PO.search(content)
if match is None:
continue
try:
for pragma_repr in parse_pragma(match.group(2)):
if pragma_repr.action in ("disable-all", "skip-file"):
if pragma_repr.action == "disable-all":
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable-all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
try:
meth = self._options_methods[pragma_repr.action]
except KeyError:
meth = self._bw_options_methods[pragma_repr.action]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message(
"deprecated-pragma",
line=start[0],
args=(
pragma_repr.action,
pragma_repr.action.replace("-msg", ""),
),
)
for msgid in pragma_repr.messages:
# Add the line where a control pragma was encountered.
if pragma_repr.action in control_pragmas:
self._pragma_lineno[msgid] = start[0]
if (pragma_repr.action, msgid) == ("disable", "all"):
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable=all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
# If we did not see a newline between the previous line and now,
# we saw a backslash so treat the two lines as one.
l_start = start[0]
if not saw_newline:
l_start -= 1
try:
meth(msgid, "module", l_start)
except exceptions.UnknownMessageError:
self.add_message(
"bad-option-value", args=msgid, line=start[0]
)
except UnRecognizedOptionError as err:
self.add_message(
"unrecognized-inline-option", args=err.token, line=start[0]
)
continue
except InvalidPragmaError as err:
self.add_message("bad-inline-option", args=err.token, line=start[0])
continue
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [
c
for _checkers in self._checkers.values()
for c in _checkers
if c is not self
]
def get_checker_names(self):
"""Get all the checker names that this linter knows about."""
current_checkers = self.get_checkers()
return sorted(
{
checker.name
for checker in current_checkers
if checker.name != MAIN_CHECKER_NAME
}
)
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
needed_checkers = [self]
for checker in self.get_checkers()[1:]:
messages = {msg for msg in checker.msgs if self.is_message_enabled(msg)}
if messages or any(self.report_is_enabled(r[0]) for r in checker.reports):
needed_checkers.append(checker)
# Sort checkers by priority
needed_checkers = sorted(
needed_checkers, key=operator.attrgetter("priority"), reverse=True
)
return needed_checkers
# pylint: disable=unused-argument
@staticmethod
def should_analyze_file(modname, path, is_argument=False):
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:param bool is_argument: Whether the file is an argument to pylint or not.
Files which respect this property are always
checked, since the user requested it explicitly.
:returns: True if the module should be checked.
:rtype: bool
"""
if is_argument:
return True
return path.endswith(".py")
# pylint: enable=unused-argument
def initialize(self):
"""Initialize linter for linting
This method is called before any linting is done.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their name.
files_or_modules is either a string or list of strings presenting modules to check.
"""
self.initialize()
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.from_stdin:
if len(files_or_modules) != 1:
raise exceptions.InvalidArgsError(
"Missing filename required for --from-stdin"
)
filepath = files_or_modules[0]
with fix_import_path(files_or_modules):
self._check_files(
functools.partial(self.get_ast, data=_read_stdin()),
[self._get_file_descr_from_stdin(filepath)],
)
elif self.config.jobs == 1:
with fix_import_path(files_or_modules):
self._check_files(
self.get_ast, self._iterate_file_descrs(files_or_modules)
)
else:
check_parallel(
self,
self.config.jobs,
self._iterate_file_descrs(files_or_modules),
files_or_modules,
)
def check_single_file(self, name, filepath, modname):
"""Check single file
The arguments are the same that are documented in _check_files
The initialize() method should be called before calling this method
"""
with self._astroid_module_checker() as check_astroid_module:
self._check_file(
self.get_ast, check_astroid_module, name, filepath, modname
)
def _check_files(self, get_ast, file_descrs):
"""Check all files from file_descrs
The file_descrs should be iterable of tuple (name, filepath, modname)
where
- name: full name of the module
- filepath: path of the file
- modname: module name
"""
with self._astroid_module_checker() as check_astroid_module:
for name, filepath, modname in file_descrs:
self._check_file(get_ast, check_astroid_module, name, filepath, modname)
def _check_file(self, get_ast, check_astroid_module, name, filepath, modname):
"""Check a file using the passed utility functions (get_ast and check_astroid_module)
:param callable get_ast: callable returning AST from defined file taking the following arguments
- filepath: path to the file to check
- name: Python module name
:param callable check_astroid_module: callable checking an AST taking the following arguments
- ast: AST of the module
:param str name: full name of the module
:param str filepath: path to checked file
:param str modname: name of the checked Python module
"""
self.set_current_module(name, filepath)
# get the module representation
ast_node = get_ast(filepath, name)
if ast_node is None:
return
self._ignore_file = False
self.file_state = FileState(modname)
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
check_astroid_module(ast_node)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(
self.msgs_store
)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
@staticmethod
def _get_file_descr_from_stdin(filepath):
"""Return file description (tuple of module name, file path, base name) from given file path
This method is used for creating suitable file description for _check_files when the
source is standard input.
"""
try:
# Note that this function does not really perform an
# __import__ but may raise an ImportError exception, which
# we want to catch here.
modname = ".".join(astroid.modutils.modpath_from_file(filepath))
except ImportError:
modname = os.path.splitext(os.path.basename(filepath))[0]
return (modname, filepath, filepath)
def _iterate_file_descrs(self, files_or_modules):
"""Return generator yielding file descriptions (tuples of module name, file path, base name)
The returned generator yield one item for each Python module that should be linted.
"""
for descr in self._expand_files(files_or_modules):
name, filepath, is_arg = descr["name"], descr["path"], descr["isarg"]
if self.should_analyze_file(name, filepath, is_argument=is_arg):
yield (name, filepath, descr["basename"])
def _expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors"""
result, errors = expand_modules(
modules, self.config.black_list, self.config.black_list_re
)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, "")
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats["by_module"][modname] = {}
self.stats["by_module"][modname]["statement"] = 0
for msg_cat in MSG_TYPES.values():
self.stats["by_module"][modname][msg_cat] = 0
@contextlib.contextmanager
def _astroid_module_checker(self):
"""Context manager for checking ASTs
The value in the context is callable accepting AST as its only argument.
"""
walker = ASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [
c
for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker) and c is not self
]
rawcheckers = [
c for c in _checkers if interfaces.implements(c, interfaces.IRawChecker)
]
# notify global begin
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
yield functools.partial(
self.check_astroid_module,
walker=walker,
tokencheckers=tokencheckers,
rawcheckers=rawcheckers,
)
# notify global end
self.stats["statement"] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def get_ast(self, filepath, modname, data=None):
"""Return an ast(roid) representation of a module or a string.
:param str filepath: path to checked file.
:param str modname: The name of the module to be checked.
:param str data: optional contents of the checked file.
:returns: the AST
:rtype: astroid.nodes.Module
"""
try:
if data is None:
return MANAGER.ast_from_file(filepath, modname, source=True)
return astroid.builder.AstroidBuilder(MANAGER).string_build(
data, modname, filepath
)
except astroid.AstroidSyntaxError as ex:
# pylint: disable=no-member
self.add_message(
"syntax-error",
line=getattr(ex.error, "lineno", 0),
col_offset=getattr(ex.error, "offset", None),
args=str(ex.error),
)
except astroid.AstroidBuildingException as ex:
self.add_message("parse-error", args=ex)
except Exception as ex: # pylint: disable=broad-except
traceback.print_exc()
self.add_message("astroid-error", args=(ex.__class__, ex))
return None
def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers):
"""Check a module from its astroid representation.
For return value see _check_astroid_module
"""
before_check_statements = walker.nbstatements
retval = self._check_astroid_module(
ast_node, walker, rawcheckers, tokencheckers
)
self.stats["by_module"][self.current_name]["statement"] = (
walker.nbstatements - before_check_statements
)
return retval
def _check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers):
"""Check given AST node with given walker and checkers
:param astroid.nodes.Module ast_node: AST node of the module to check
:param pylint.utils.ast_walker.ASTWalker walker: AST walker
:param list rawcheckers: List of token checkers to use
:param list tokencheckers: List of raw checkers to use
:returns: True if the module was checked, False if ignored,
None if the module contents could not be parsed
:rtype: bool
"""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message("syntax-error", line=ex.args[1][0], args=ex.args[0])
return None
if not ast_node.pure_python:
self.add_message("raw-checker-failed", args=ast_node.name)
else:
# assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {"by_module": {}, "by_msg": {}}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.max_inferable_values = self.config.limit_inference_results
MANAGER.extension_package_whitelist.update(self.config.extension_pkg_allow_list)
if self.config.extension_pkg_whitelist:
MANAGER.extension_package_whitelist.update(
self.config.extension_pkg_whitelist
)
for msg_cat in MSG_TYPES.values():
self.stats[msg_cat] = 0
def generate_reports(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
else:
sect = report_nodes.Section()
if self.config.reports:
self.reporter.display_reports(sect)
score_value = self._report_evaluation()
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, {})
score_value = None
return score_value
def _report_evaluation(self):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
note = None
previous_stats = config.load_results(self.file_state.base_name)
if self.stats["statement"] == 0:
return note
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = "An exception occurred while rating: %s" % ex
else:
self.stats["global_note"] = note
msg = "Your code has been rated at %.2f/10" % note
pnote = previous_stats.get("global_note")
if pnote is not None:
msg += f" (previous run: {pnote:.2f}/10, {note - pnote:+.2f})"
if self.config.score:
sect = report_nodes.EvaluationSection(msg)
self.reporter.display_reports(sect)
return note
| 1 | 14,034 | I checked the tests coverage and strangely it look like those three lines are not covered (?!). Do you have the same result on your side ? | PyCQA-pylint | py |
@@ -35,4 +35,4 @@ namespace System.MathBenchmarks
}
}
}
-}
+} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using BenchmarkDotNet.Attributes;
namespace System.MathBenchmarks
{
public partial class Double
{
// Tests Math.Acosh(double) over 5000 iterations for the domain +1, +3
private const double acoshDelta = 0.0004;
private const double acoshExpectedResult = 6148.648751739127;
[Benchmark]
public void Acosh() => AcoshTest();
public static void AcoshTest()
{
var result = 0.0; var value = 1.0;
for (var iteration = 0; iteration < MathTests.Iterations; iteration++)
{
result += Math.Acosh(value);
value += acoshDelta;
}
var diff = Math.Abs(acoshExpectedResult - result);
if (double.IsNaN(result) || (diff > MathTests.DoubleEpsilon))
{
throw new Exception($"Expected Result {acoshExpectedResult,20:g17}; Actual Result {result,20:g17}");
}
}
}
} | 1 | 10,955 | nit: please try to avoid changing end of the file (it adds a noise to the code review, but does not provide too much value) | dotnet-performance | .cs |
@@ -57,9 +57,16 @@ namespace AutoRest.Swagger.Model.Utilities
.Where(modelName => !(IsBaseResourceModelName(modelName))
&& serviceDefinition.Definitions.ContainsKey(modelName)
&& IsAllOfOnModelNames(modelName, serviceDefinition.Definitions, xmsAzureResourceModels));
+
+ var unfilteredResourceCandidates = resourceModels.Union(modelsAllOfOnXmsAzureResources);
+
+ // Now filter all the resource models that are returned from a POST operation only
+ var postOpResourceModels = serviceDefinition.Paths.Values.SelectMany(pathObj => pathObj.Where(opObj => opObj.Key.EqualsIgnoreCase("post"))
+ .SelectMany(opObj => opObj.Value.Responses?.Select(resp => resp.Value?.Schema?.Reference?.StripDefinitionPath())))
+ .Where(model => !string.IsNullOrWhiteSpace(model));
- // return the union
- return resourceModels.Union(modelsAllOfOnXmsAzureResources);
+ // if any model is returned only by a POST operation, disregard it
+ return unfilteredResourceCandidates.Except(postOpResourceModels);
}
| 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using AutoRest.Swagger;
using AutoRest.Core.Utilities;
using System.Collections.Generic;
using System.Linq;
using System.Text.RegularExpressions;
using AutoRest.Swagger.Validation;
using System.Text;
using System;
namespace AutoRest.Swagger.Model.Utilities
{
public static class ValidationUtilities
{
public static readonly string XmsPageable = "x-ms-pageable";
private static readonly IEnumerable<string> BaseResourceModelNames =
new List<string>() { "trackedresource", "proxyresource", "resource" };
private static readonly Regex ResourceProviderPathPattern = new Regex(@"/providers/(?<resPath>[^{/]+)/", RegexOptions.IgnoreCase);
private static readonly Regex PropNameRegEx = new Regex(@"^[a-z0-9\$-]+([A-Z]{1,3}[a-z0-9\$-]+)+$|^[a-z0-9\$-]+$|^[a-z0-9\$-]+([A-Z]{1,3}[a-z0-9\$-]+)*[A-Z]{1,3}$");
public static readonly Regex listBySidRegEx = new Regex(@".+_(List|ListBySubscriptionId|ListBySubscription|ListBySubscriptions)$", RegexOptions.IgnoreCase);
public static readonly Regex listByRgRegEx = new Regex(@".+_ListByResourceGroup$", RegexOptions.IgnoreCase);
/// <summary>
/// Populates a list of 'Resource' models found in the service definition
/// </summary>
/// <param name="serviceDefinition">serviceDefinition for which to populate the resources</param>
/// <returns>List of resource models</returns>
public static IEnumerable<string> GetResourceModels(ServiceDefinition serviceDefinition)
{
if (serviceDefinition.Definitions?.Any() != true)
{
return new List<string>();
}
var xmsAzureResourceModels = GetXmsAzureResourceModels(serviceDefinition.Definitions);
// Get all models that are returned by PUT operations (200/201 response)
var putOperationsResponseModels = GetOperationResponseModels("put", serviceDefinition);
putOperationsResponseModels = putOperationsResponseModels.Union(GetOperationResponseModels("put", serviceDefinition, "201"));
// Get all models that 'allOf' on models that are named 'Resource' and are returned by any GET operation
var getOperationsResponseModels = GetOperationResponseModels("get", serviceDefinition);
getOperationsResponseModels =
getOperationsResponseModels.Where(modelName => serviceDefinition.Definitions.ContainsKey(modelName))
.Where(modelName => IsAllOfOnModelNames(modelName, serviceDefinition.Definitions, new List<string>() { "Resource" }));
var resourceModels = putOperationsResponseModels.Union(getOperationsResponseModels);
// Pick all models other than the ones that have already been determined to be resources
// then pick all models that allOf on xmsAzureResourceModels at any level of hierarchy
var modelsAllOfOnXmsAzureResources = serviceDefinition.Definitions.Keys.Except(resourceModels)
.Where(modelName => !(IsBaseResourceModelName(modelName))
&& serviceDefinition.Definitions.ContainsKey(modelName)
&& IsAllOfOnModelNames(modelName, serviceDefinition.Definitions, xmsAzureResourceModels));
// return the union
return resourceModels.Union(modelsAllOfOnXmsAzureResources);
}
public static bool IsODataProperty(string propName) => propName.ToLower().StartsWith("@");
/// <summary>
/// checks if a model is a base resource type (resource, trackedresource or proxyresource)
/// </summary>
/// <param name="modelName">model name to check</param>
/// <returns> true if model is a base resource type </returns>
public static bool IsBaseResourceModelName(string modelName) => BaseResourceModelNames.Contains(modelName.ToLower());
/// <summary>
/// Returns the cumulative list of all 'allOfed' references for a model
/// </summary>
/// <param name="modelName">model for which to determine the model hierarchy</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <param name="propertyList">List of 'allOfed' models</param>
public static IEnumerable<string> EnumerateModelHierarchy(string modelName, Dictionary<string, Schema> definitions)
{
if (!definitions.ContainsKey(modelName)) return new List<string>();
IEnumerable<string> modelHierarchy = new List<string>() { modelName };
// If schema has no allOfs, return
var modelSchema = definitions[modelName];
if (modelSchema.AllOf?.Any() != true) return modelHierarchy;
// for each allOf in the schema, recursively pick the models
var allOfs = modelSchema.AllOf.Select(allOfSchema => allOfSchema.Reference?.StripDefinitionPath()).Where(modelRef => !string.IsNullOrEmpty(modelRef));
return modelHierarchy.Union(allOfs.SelectMany(allOf => EnumerateModelHierarchy(allOf, definitions))).ToList();
}
/// <summary>
/// Returns the cumulative list of all properties found in the entire model hierarchy
/// </summary>
/// <param name="modelName">model for which to check the properties</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <returns>List of properties found in model hierarchy</returns>
private static IEnumerable<KeyValuePair<string, Schema>> EnumerateProperties(string modelName, Dictionary<string, Schema> definitions)
{
var modelsToCheck = EnumerateModelHierarchy(modelName, definitions);
var propertiesList = new List<KeyValuePair<string, Schema>>();
foreach (var modelRef in modelsToCheck)
{
if (!definitions.ContainsKey(modelRef) || definitions[modelRef].Properties?.Any() != true) continue;
propertiesList = propertiesList.Union(definitions[modelRef].Properties).ToList();
}
return propertiesList;
}
/// <summary>
/// Returns the cumulative list of all required properties found in the entire model hierarchy
/// </summary>
/// <param name="modelName">model for which to check the required properties</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <param name="propertyList">List of required properties found in model hierarchy</param>
public static IEnumerable<string> EnumerateRequiredProperties(string modelName, Dictionary<string, Schema> definitions)
{
var modelsToCheck = EnumerateModelHierarchy(modelName, definitions);
var propertiesList = new List<string>();
foreach (var modelRef in modelsToCheck)
{
if (!definitions.ContainsKey(modelRef) || definitions[modelRef].Required?.Any() != true) continue;
propertiesList = propertiesList.Union(definitions[modelRef].Required.Where(reqProp => !string.IsNullOrEmpty(reqProp))).ToList();
}
return propertiesList;
}
/// <summary>
/// Returns the cumulative list of all read only properties found in the entire model hierarchy
/// </summary>
/// <param name="modelName">model for which to find the read only properties</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <param name="propertyList">List of read only properties found in model hierarchy</param>
public static IEnumerable<string> EnumerateReadOnlyProperties(string modelName, Dictionary<string, Schema> definitions)
=> EnumerateProperties(modelName, definitions).Where(prop => prop.Value.ReadOnly).Select(prop => prop.Key);
public static IEnumerable<KeyValuePair<string, Schema>> EnumerateDefaultValuedProperties(string modelName, Dictionary<string, Schema> definitions)
=> EnumerateProperties(modelName, definitions).Where(prop => prop.Value.Default != null);
/// <summary>
/// Checks if model hierarchy consists of given set of properties
/// </summary>
/// <param name="modelName">model for which to check the resource properties</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <param name="propertyList">List of properties to be checked for in a model hierarchy</param>
/// <returns>true if the model hierarchy contains all of the resource model properties</returns>
public static bool ContainsProperties(string modelName, Dictionary<string, Schema> definitions, IEnumerable<string> propertiesToCheck)
{
var propertyList = EnumerateProperties(modelName, definitions);
return !propertiesToCheck.Except(propertyList.Select(prop=>prop.Key)).Any();
}
/// <summary>
/// Checks if model hierarchy consists of given set of required properties
/// </summary>
/// <param name="modelName">model for which to check the resource properties</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <param name="propertyList">List of required properties to be checked for in a model hierarchy</param>
/// <returns>true if the model hierarchy contains all of the required properties</returns>
public static bool ContainsRequiredProperties(string modelName, Dictionary<string, Schema> definitions, IEnumerable<string> requiredPropertiesToCheck)
{
var propertyList = EnumerateRequiredProperties(modelName, definitions);
return !requiredPropertiesToCheck.Except(propertyList).Any();
}
/// <summary>
/// Checks if model hierarchy consists of given set of read only properties
/// </summary>
/// <param name="modelName">model for which to check the resource properties</param>
/// <param name="definitions">dictionary of model definitions</param>
/// <param name="propertyList">List of read only properties to be checked for in a model hierarchy</param>
/// <returns>true if the model hierarchy contains all of the read only properties</returns>
public static bool ContainsReadOnlyProperties(string modelName, Dictionary<string, Schema> definitions, IEnumerable<string> readOnlyPropertiesToCheck)
{
var propertyList = EnumerateReadOnlyProperties(modelName, definitions);
return !readOnlyPropertiesToCheck.Except(propertyList).Any();
}
/// <summary>
/// Gets response models returned by operations with given httpVerb
/// by default looks at the '200' response
/// </summary>
/// <param name="httpVerb">operation verb for which to determine the response model</param>
/// <param name="serviceDefinition">service definition containing the operations</param>
/// <param name="respCode">The response code to look at when fetching models, by default '200'</param>
/// <returns>list of model names that are returned by all operations matching the httpVerb</returns>
public static IEnumerable<string> GetOperationResponseModels(string httpVerb, ServiceDefinition serviceDefinition, string respCode = "200")
{
var operations = GetOperationsByRequestMethod(httpVerb, serviceDefinition)
.Where(op => op.Responses?.ContainsKey(respCode) == true);
return operations.Select(op => op.Responses[respCode]?.Schema?.Reference?.StripDefinitionPath())
.Where(modelName => !string.IsNullOrEmpty(modelName));
}
/// <summary>
/// Gets all models that have the x-ms-azure-resource extension set on them
/// </summary>
/// <param name="definitions">model definitions in which to find the x-ms-azure-resource extension</param>
/// <returns>list of model names that have the x-ms-azure-resource extension set on them</returns>
public static IEnumerable<string> GetXmsAzureResourceModels(Dictionary<string, Schema> definitions)
=> definitions.Where(defPair =>
defPair.Value.Extensions?.ContainsKey("x-ms-azure-resource") == true &&
defPair.Value.Extensions["x-ms-azure-resource"].Equals(true))
.Select(defPair => defPair.Key);
/// <summary>
/// For a given model, recursively traverses its allOfs and checks if any of them refer to
/// the base resourceModels
/// </summary>
/// <param name="modelName">model for which to determine if it is allOfs on given model names</param>
/// <param name="definitions">dictionary that contains model definitions</param>
/// <param name="allOfedModels">list of allOfed models</param>
/// <returns>true if given model allOfs on given allOf list at any level of hierarchy</returns>
public static bool IsAllOfOnModelNames(string modelName, Dictionary<string, Schema> definitions, IEnumerable<string> allOfedModels)
{
// if the model being tested belongs to the allOfed list, return false
// if model can't be found in definitions we can't verify
// if model does not have any allOfs, return early
if (allOfedModels.Contains(modelName) || !definitions.ContainsKey(modelName) || definitions[modelName]?.AllOf?.Any() != true)
{
return false;
}
var modelHierarchy = EnumerateModelHierarchy(modelName, definitions);
// if the given model is allOfing on any of the given models, return true
return allOfedModels.Intersect(modelHierarchy).Any();
}
/// <summary>
/// For a given set of resource models evaluates which models are tracked and returns those
/// </summary>
/// <param name="resourceModels">list of resourceModels from which to evaluate the tracked resources</param>
/// <param name="definitions">the dictionary of model definitions</param>
/// <returns>list of tracked resources</returns>
public static IEnumerable<string> GetTrackedResources(IEnumerable<string> resourceModels, Dictionary<string, Schema> definitions)
=> resourceModels.Where(resModel => ContainsRequiredProperties(resModel, definitions, new List<string>() { "location" }));
/// <summary>
/// Determines if an operation is xms pageable operation
/// </summary>
/// <param name="op">Operation for which to check the x-ms-pageable extension</param>
/// <returns>true if operation is x-ms-pageable</returns>
public static bool IsXmsPageableResponseOperation(Operation op) => (op.Extensions?.GetValue<object>(XmsPageable) != null);
/// <summary>
/// Determines if an operation returns an object of array type
/// </summary>
/// <param name="op">Operation for which to check the x-ms-pageable extension</param>
/// <param name="serviceDefinition">Service definition that contains the operation</param>
/// <returns>true if operation returns an array type</returns>
public static bool IsArrayTypeResponseOperation(Operation op, ServiceDefinition entity)
{
// if a success response is not defined, we have nothing to check, return false
if (op.Responses?.ContainsKey("200") != true) return false;
// if we have a non-null response schema, and the schema is of type array, return true
if (op.Responses["200"]?.Schema?.Reference?.Equals(string.Empty) == false)
{
var modelLink = op.Responses["200"].Schema.Reference;
var def = entity.Definitions.GetValueOrNull(modelLink.StripDefinitionPath());
// if the object has more than 2 properties, we can assume its a composite object
// that does not represent a collection of some type
var propertyCount = def?.Properties?.Values?.Count;
if (propertyCount == null || propertyCount > 2)
{
return false;
}
// if the object is an allof on some other object, let's consider it to be a composite object
if (def.AllOf != null)
{
return false;
}
if (def.Properties.Values.Any(type => type.Type == DataType.Array))
{
return true;
}
}
return false;
}
/// <summary>
/// Returns all operations that match the httpverb (from all paths in service definitions)
/// </summary>
/// <param name="id">httpverb to check for</param>
/// <param name="serviceDefinition">service definition in which to find the operations</param>
/// <param name="includeCustomPaths">whether to include the x-ms-paths</param>
/// <returns>list if operations that match the httpverb</returns>
public static IEnumerable<Operation> GetOperationsByRequestMethod(string id, ServiceDefinition serviceDefinition, bool includeCustomPaths = true)
{
var pathOperations = SelectOperationsFromPaths(id, serviceDefinition.Paths);
if (includeCustomPaths)
{
pathOperations.Concat(SelectOperationsFromPaths(id, serviceDefinition.CustomPaths));
}
return pathOperations;
}
public static bool IsXmsPageableOrArrayTypeResponseOperation(Operation op, ServiceDefinition entity) =>
(IsXmsPageableResponseOperation(op) || IsArrayTypeResponseOperation(op, entity));
/// <summary>
/// Returns all operations that match the httpverb
/// </summary>
/// <param name="id">httpverb to check for</param>
/// <param name="paths">paths in which to find the operations with given verb</param>
/// <returns>list if operations that match the httpverb</returns>
private static IEnumerable<Operation> SelectOperationsFromPaths(string id, Dictionary<string, Dictionary<string, Operation>> paths)
=> paths.Values.SelectMany(pathObjs=>pathObjs.Where(pair => pair.Key.ToLower().Equals(id.ToLower())).Select(pair => pair.Value));
/// <summary>
/// Returns a suggestion of camel case styled string based on the string passed as parameter.
/// </summary>
/// <param name="name">String to convert to camel case style</param>
/// <returns>A string that conforms with camel case style based on the string passed as parameter.</returns>
public static string GetCamelCasedSuggestion(string name)
{
StringBuilder sb = new StringBuilder(name);
if (sb.Length > 0)
{
sb[0] = sb[0].ToString().ToLower()[0];
}
bool firstUpper = true;
for (int i = 1; i<name.Length; i++)
{
if (char.IsUpper(sb[i]) && firstUpper)
{
firstUpper = false;
}
else
{
firstUpper = true;
if (char.IsUpper(sb[i]))
{
sb[i] = sb[i].ToString().ToLower()[0];
}
}
}
return sb.ToString();
}
/// <summary>
/// Returns whether a string follows camel case style, allowing for 2 consecutive upper case characters for acronyms.
/// </summary>
/// <param name="name">String to check for style</param>
/// <returns>true if "name" follows camel case style (allows for 2 consecutive upper case characters), false otherwise.</returns>
public static bool IsNameCamelCase(string name) => PropNameRegEx.IsMatch(name);
/// <summary>
/// Evaluates if the reference is of the provided data type.
/// </summary>
/// <param name="reference">reference to evaluate</param>
/// <param name="definitions">definition list</param>
/// <param name="dataType">Datatype value to evaluate</param>
/// <returns>true if the reference is of the provided data type. False otherwise.</returns>
public static bool IsReferenceOfType(string reference, Dictionary<string, Schema> definitions, Model.DataType dataType)
{
if (reference == null)
{
return false;
}
string definitionName = Extensions.StripDefinitionPath(reference);
Schema schema = definitions.GetValueOrNull(definitionName);
if (schema == null)
{
return false;
}
if (schema.Type == dataType || (schema.Type == null && schema.Reference != null && IsReferenceOfType(schema.Reference, definitions, dataType)))
{
return true;
}
return false;
}
/// <summary>
/// Checks if the reference to match is an array response of the reference
/// </summary>
/// <param name="reference"></param>
/// <param name="referenceToMatch"></param>
/// <param name="definitions"></param>
/// <returns></returns>
private static bool IsArrayOf(string reference, string referenceToMatch, Dictionary<string, Schema> definitions)
{
if (reference == null)
return false;
Schema schema = Schema.FindReferencedSchema(reference, definitions);
return schema.Properties.Any(property => property.Value.Type == DataType.Array && property.Value.Items?.Reference?.EndsWith("/" + referenceToMatch) == true);
}
/// <summary>
/// Returns array of resource providers
/// </summary>
/// <param name="paths">Dictionary of paths to look for</param>
/// <returns>Array of resource providers</returns>
public static IEnumerable<string> GetResourceProviders(Dictionary<string, Dictionary<string, Operation>> paths)
{
IEnumerable<string> resourceProviders = paths?.Keys.SelectMany(path => ResourceProviderPathPattern.Matches(path)
.OfType<Match>()
.Select(match => match.Groups["resPath"].Value.ToString()))
.Distinct()
.ToList();
return resourceProviders;
}
/// <summary>
/// Given an operation Id, returns the path where it is found
/// </summary>
/// <param name="operationId">operationId to look for</param>
/// <param name="paths">Dictionary of paths to look for</param>
/// <returns>path object which contains the operationId</returns>
public static KeyValuePair<string, Dictionary<string, Operation>> GetOperationIdPath(string operationId, Dictionary<string, Dictionary<string, Operation>> paths)
=> paths.Where(pathObj => pathObj.Value.Values.Where(op => op.OperationId == operationId).Any()).First();
/// <summary>
/// Given an operation Id, returns the corresponding verb for it
/// </summary>
/// <param name="operationId">operationId to look for</param>
/// <param name="paths">Dictionary of paths</param>
/// <returns>HTTP verb corresponding to the operationId</returns>
public static string GetOperationIdVerb(string operationId, KeyValuePair<string, Dictionary<string, Operation>> pathObj)
=> pathObj.Value.First(opObj => opObj.Value.OperationId == operationId).Key;
/// <summary>
/// Get the list of all ChildTrackedResources along with their immediate parents.
/// </summary>
/// <param name="serviceDefinition">Service Definition</param>
/// <returns>list of child tracked resources</returns>
public static IEnumerable<KeyValuePair<string, string>> GetChildTrackedResourcesWithImmediateParent(ServiceDefinition serviceDefinition)
{
LinkedList<KeyValuePair<string, string>> result = new LinkedList<KeyValuePair<string, string>>();
foreach (KeyValuePair<string, Dictionary<string, Operation>> pathDefinition in serviceDefinition.Paths)
{
KeyValuePair<string, string> childResourceMapping = GetChildAndImmediateParentResource(pathDefinition.Key, serviceDefinition.Paths, serviceDefinition.Definitions);
if (childResourceMapping.Key != null && childResourceMapping.Value != null)
{
result.AddLast(new LinkedListNode<KeyValuePair<string, string>>(new KeyValuePair<string, string>(childResourceMapping.Key, childResourceMapping.Value)));
}
}
return result;
}
private static KeyValuePair<string, string> GetChildAndImmediateParentResource(string path, Dictionary<string, Dictionary<string, Operation>> paths, Dictionary<string, Schema> definitions)
{
Match match = resourcePathPattern.Match(path);
KeyValuePair<string, string> result = new KeyValuePair<string, string>();
if (match.Success)
{
string childResourceName = match.Groups["childresource"].Value;
string immediateParentResourceNameInPath = GetImmediateParentResourceName(path);
string immediateParentResourceNameActual = GetActualParentResourceName(immediateParentResourceNameInPath, paths, definitions);
result = new KeyValuePair<string, string>(childResourceName, immediateParentResourceNameActual);
}
return result;
}
/// <summary>
/// Gets the immediate parent resource name
/// </summary>
/// <param name="childResourcePaths">paths that fits the child resource criteria</param>
/// <returns>name of the immediate parent resource name</returns>
private static string GetImmediateParentResourceName(string pathToEvaluate)
{
pathToEvaluate = pathToEvaluate.Substring(0, pathToEvaluate.LastIndexOf("/{"));
pathToEvaluate = pathToEvaluate.Substring(0, pathToEvaluate.LastIndexOf("/{"));
return pathToEvaluate.Substring(pathToEvaluate.LastIndexOf("/") + 1);
}
/// <summary>
/// Gets the actual parent resource name. For example, the name in Path could be 'servers'. The actual parent name is 'server'.
/// </summary>
/// <param name="nameInPath"></param>
/// <param name="paths"></param>
/// <param name="definitions"></param>
/// <returns></returns>
private static string GetActualParentResourceName(string nameInPath, Dictionary<string, Dictionary<string, Operation>> paths, Dictionary<string, Schema> definitions)
{
Regex pathRegEx = new Regex("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/.*/" + nameInPath + "/{[^/]+}$", RegexOptions.IgnoreCase);
IEnumerable<KeyValuePair<string, Dictionary<string, Operation>>> matchingPaths = paths.Where((KeyValuePair<string, Dictionary<string, Operation>> pth) => pathRegEx.IsMatch(pth.Key));
if (!matchingPaths.Any()) return null;
KeyValuePair<string, Dictionary<string, Operation>> path = matchingPaths.First();
IEnumerable<KeyValuePair<string, Operation>> operations = path.Value.Where(op => op.Key.Equals("get", StringComparison.CurrentCultureIgnoreCase));
if (!operations.Any()) return null;
KeyValuePair<string, Operation> operation = operations.First();
IEnumerable<KeyValuePair<string, OperationResponse>> responses = operation.Value.Responses.Where(resp => resp.Key.Equals("200"));
if (!responses.Any()) return null;
KeyValuePair<string, OperationResponse> response = responses.First();
return GetReferencedModel(response.Value.Schema.Reference, definitions);
}
private static string GetReferencedModel(String schema, Dictionary<string, Schema> definitions)
{
Schema referencedSchema = Schema.FindReferencedSchema(schema, definitions);
if (referencedSchema == null) return null;
if (referencedSchema.Reference == null)
{
IEnumerable<KeyValuePair<string, Schema>> definition = definitions.Where(def => def.Value == referencedSchema);
if (!definition.Any()) return null;
return definition.First().Key;
}
return GetReferencedModel(referencedSchema.Reference, definitions);
}
/*
* This regular expression tries to filter the paths which has child resources. Now we could take the following paths:
*
* Case 1: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}
* Case 2: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/poweroff
* Case 3: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases
* Case 4: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{database1}
* Case 5: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{database1}/restart
*
* Case 1 does not have a child resource. So, this can be rejected.
*
* Case 2 & Case 3 are special cases. Here, Case 3 does have a child resource - 'databases'. But, Case 2 does not have a child resource. The 'poweroff' is
* an operation - not a child resource. But, it is difficult to determine, using regular expressions, whether the 'poweroff' is an operation or child resource.
* We could filter both and determine based on the response whether it is a child resource. While this is valid, it seems to be complex. So, a decision has been
* made to reject this pattern altogether and not look for child resource in this path pattern. Note: Case 5 is also rejected for the same reason.
*
* Case 4 is a valid scenario which has a child resource. Also, in this pattern, there is no ambiguity about any operation. So, in order to find the path, with
* child resources, we use only this pattern. i.e. the path must have atleast one parent resource similar to '/servers/{server1}' followed by any number of child
* resources and end with the child resource pattern - similar to '/databases/{database1}'.
*
* Note: If in a swagger file, there are the following paths:
*
* 1: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}
* 2: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases
*
* and do not have the following path:
*
* 3: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{database1}
*
* then we will miss the child resource 'databases'. But, the possibility of such an occurence is extremely rare, if not impossible. It is quite possible that
* 1 & 3 are present without 2 and 1-2-3 are present. So, it is fine to use this logic.
*
* Immediate Parent Resource Logic
* ===============================
* The path with the child resource has been determined and the name of the child resource has been identified. Now, in order to find the immediate parent resource,
* consider the following cases:
*
* 1. /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{databases1} - Child: databases-Immediate Parent: servers
* 2. /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{databases1}/disks/{disk1} - Child: disks-Immediate Parent: databases
*
* So, we do string manipulation to determine the immediate parent. We find the last index of "/{" twice and remove after that. Then we find the last index of "/" and find the string after that.
* To visualize it:
*
* Start: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{databases1}/disks/{disk1}
* Step 1: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases/{databases1}/disks
* Step 2: /subscriptions/{subscriptionId}/resourceGroup/{resourceGroupName}/providers/Microsoft.Sql/servers/{server1}/databases
* Step 3: databases
*/
private static readonly Regex resourcePathPattern = new Regex("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/[^/]+/[^/]+/{[^/]+}.*/(?<childresource>\\w+)/{[^/]+}$", RegexOptions.IgnoreCase);
public static IEnumerable<string> GetParentTrackedResources(IEnumerable<string> trackedResourceModels, IEnumerable<KeyValuePair<string, string>> childTrackedResourceModels)
=> trackedResourceModels.Where(resourceModel => !childTrackedResourceModels.Any(childModel => childModel.Key.Equals(resourceModel)));
/// <summary>
/// For the provided resource model, it gets the operation which ends with ListByResourceGroup and returns the resource model.
/// </summary>
/// <param name="resourceModel"></param>
/// <param name="definitions"></param>
/// <param name="serviceDefinition"></param>
/// <returns>Gets the operation which ends with ListByResourceGroup and returns the resource model.</returns>
public static Operation GetListByResourceGroupOperation(string resourceModel, Dictionary<string, Schema> definitions, ServiceDefinition serviceDefinition)
{
return GetListByXOperation(resourceModel, definitions, serviceDefinition, listByRgRegEx);
}
/// <summary>
/// For the provided resource model, it gets the operation which matches with ListBySubscription and returns the resource model.
/// </summary>
/// <param name="resourceModel"></param>
/// <param name="definitions"></param>
/// <param name="serviceDefinition"></param>
/// <returns>Gets the operation which matches with ListBySubscription and returns the resource model.</returns>
public static Operation GetListBySubscriptionOperation(string resourceModel, Dictionary<string, Schema> definitions, ServiceDefinition serviceDefinition)
{
return GetListByXOperation(resourceModel, definitions, serviceDefinition, listBySidRegEx);
}
/// <summary>
/// For the provided resource model, it gets the operation which matches with specified regex and returns the resource model.
/// </summary>
/// <param name="resourceModel"></param>
/// <param name="definitions"></param>
/// <param name="serviceDefinition"></param>
/// <param name="regEx"></param>
/// <returns>Gets the operation which matches with specified regex and returns the resource model.</returns>
private static Operation GetListByXOperation(string resourceModel, Dictionary<string, Schema> definitions, ServiceDefinition serviceDefinition, Regex regEx)
{
return GetListByOperation(regEx, resourceModel, definitions, serviceDefinition);
}
/// <summary>
/// For the provided resource model, it gets the operation which matches with specified regex and returns the resource model.
/// </summary>
/// <param name="regEx"></param>
/// <param name="resourceModel"></param>
/// <param name="definitions"></param>
/// <param name="serviceDefinition"></param>
/// <returns>Gets the operation which matches with specified regex and returns the resource model.</returns>
private static Operation GetListByOperation(Regex regEx, string resourceModel, Dictionary<string, Schema> definitions, ServiceDefinition serviceDefinition)
{
IEnumerable<Operation> getOperations = ValidationUtilities.GetOperationsByRequestMethod("get", serviceDefinition);
IEnumerable<Operation> operations = getOperations.Where(operation => regEx.IsMatch(operation.OperationId) &&
IsXmsPageableResponseOperation(operation) &&
operation.Responses.Any(
response => response.Key.Equals("200") &&
IsArrayOf(response.Value.Schema?.Reference, resourceModel, definitions)));
if (operations != null && operations.Count() != 0)
{
return operations.First();
}
return null;
}
}
}
| 1 | 25,168 | I don't see the "only" part reflected in the code but maybe I'm missing it. Also, the indentation is misleading: the `.SelectMany` calls are perfectly aligned but are *not* operating on the same "level". I'd expect the second `SelectMany` to be on the same height as the inner `Where`, just break `pathObj => pathObj<HERE>.Where` and lines won't be that long. | Azure-autorest | java |
@@ -20,7 +20,8 @@ Puppet::DataTypes.create_type('Target') do
vars => { type => Optional[Hash[String[1], Data]], kind => given_or_derived },
facts => { type => Optional[Hash[String[1], Data]], kind => given_or_derived },
features => { type => Optional[Array[String[1]]], kind => given_or_derived },
- plugin_hooks => { type => Optional[Hash[String[1], Data]], kind => given_or_derived }
+ plugin_hooks => { type => Optional[Hash[String[1], Data]], kind => given_or_derived },
+ resources => { type => Optional[Hash[String[1], ResourceInstance]], kind => given_or_derived }
},
functions => {
host => Callable[[], Optional[String]], | 1 | # frozen_string_literal: true
Puppet::DataTypes.create_type('Target') do
begin
inventory = Puppet.lookup(:bolt_inventory)
target_implementation_class = inventory.target_implementation_class
rescue Puppet::Context::UndefinedBindingError
target_implementation_class = Bolt::Target
end
require 'bolt/target'
interface <<-PUPPET
attributes => {
uri => { type => Optional[String[1]], kind => given_or_derived },
name => { type => Optional[String[1]] , kind => given_or_derived },
safe_name => { type => Optional[String[1]], kind => given_or_derived },
target_alias => { type => Optional[Variant[String[1], Array[String[1]]]], kind => given_or_derived },
config => { type => Optional[Hash[String[1], Data]], kind => given_or_derived },
vars => { type => Optional[Hash[String[1], Data]], kind => given_or_derived },
facts => { type => Optional[Hash[String[1], Data]], kind => given_or_derived },
features => { type => Optional[Array[String[1]]], kind => given_or_derived },
plugin_hooks => { type => Optional[Hash[String[1], Data]], kind => given_or_derived }
},
functions => {
host => Callable[[], Optional[String]],
password => Callable[[], Optional[String[1]]],
port => Callable[[], Optional[Integer]],
protocol => Callable[[], Optional[String[1]]],
transport => Callable[[], String[1]],
transport_config => Callable[[], Hash[String[1], Data]],
user => Callable[[], Optional[String[1]]]
}
PUPPET
implementation_class target_implementation_class
end
| 1 | 14,599 | It seems like most of these will never be nil, are they optional just in case? | puppetlabs-bolt | rb |
@@ -88,7 +88,7 @@ public class TableCodecTest {
try {
byte[] bytes = TableCodec.encodeRow(tblInfo.getColumns(), values, tblInfo.isPkHandle());
// testing the correctness via decodeRow
- Row row = TableCodec.decodeRow(bytes, tblInfo.getColumns());
+ Row row = TableCodec.decodeRow(bytes, -1, tblInfo);
for (int j = 0; j < tblInfo.getColumns().size(); j++) {
assertEquals(row.get(j, null), values[j]);
} | 1 | package com.pingcap.tikv.codec;
import static org.junit.Assert.*;
import com.google.common.collect.ImmutableList;
import com.pingcap.tikv.meta.MetaUtils;
import com.pingcap.tikv.meta.TiColumnInfo;
import com.pingcap.tikv.meta.TiTableInfo;
import com.pingcap.tikv.row.Row;
import com.pingcap.tikv.types.IntegerType;
import com.pingcap.tikv.types.MySQLType;
import com.pingcap.tikv.types.StringType;
import java.util.ArrayList;
import java.util.List;
import org.joda.time.DateTime;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class TableCodecTest {
private static TiTableInfo createTable() {
StringType VARCHAR255 =
new StringType(
new TiColumnInfo.InternalTypeHolder(
MySQLType.TypeVarchar.getTypeCode(), 0, 255, 0, "", "", ImmutableList.of()));
return new MetaUtils.TableBuilder()
.name("testTable")
.addColumn("c1", IntegerType.INT, true)
.addColumn("c2", IntegerType.BIGINT)
// TODO: enable when support Timestamp
// .addColumn("c3", DateTimeType.DATETIME)
// .addColumn("c4", TimestampType.TIMESTAMP)
.addColumn("c5", VARCHAR255)
.addColumn("c6", VARCHAR255)
// .appendIndex("testIndex", ImmutableList.of("c1", "c2"), false)
.build();
}
private Object[] values;
private TiTableInfo tblInfo = createTable();
private void makeValues() {
List<Object> values = new ArrayList<>();
values.add(1L);
values.add(1L);
DateTime dateTime = DateTime.parse("1995-10-10");
// values.add(new Timestamp(dateTime.getMillis()));
// values.add(new Timestamp(dateTime.getMillis()));
values.add("abc");
values.add("中");
this.values = values.toArray();
}
@Before
public void setUp() {
makeValues();
}
@Rule public ExpectedException expectedEx = ExpectedException.none();
@Test
public void testRowCodecThrowException() {
try {
TableCodec.encodeRow(
tblInfo.getColumns(), new Object[] {values[0], values[1]}, tblInfo.isPkHandle());
expectedEx.expect(IllegalAccessException.class);
expectedEx.expectMessage("encodeRow error: data and columnID count not match 6 vs 2");
} catch (IllegalAccessException ignored) {
}
}
@Test
public void testEmptyValues() {
try {
byte[] bytes = TableCodec.encodeRow(new ArrayList<>(), new Object[] {}, false);
assertEquals(1, bytes.length);
assertEquals(Codec.NULL_FLAG, bytes[0]);
} catch (IllegalAccessException ignored) {
}
}
@Test
public void testRowCodec() {
// multiple test was added since encodeRow refuse its cdo
for (int i = 0; i < 4; i++) {
try {
byte[] bytes = TableCodec.encodeRow(tblInfo.getColumns(), values, tblInfo.isPkHandle());
// testing the correctness via decodeRow
Row row = TableCodec.decodeRow(bytes, tblInfo.getColumns());
for (int j = 0; j < tblInfo.getColumns().size(); j++) {
assertEquals(row.get(j, null), values[j]);
}
} catch (IllegalAccessException ignored) {
}
}
}
}
| 1 | 10,240 | `.addColumn("c1", IntegerType.INT, true)` means `PkHandle=true`, maybe should add `.setPkHandle(true)` in line 31 | pingcap-tispark | java |
@@ -56,8 +56,10 @@ public class RowKey extends Key implements Serializable {
Object obj = handle.getValue();
if (obj instanceof Long) {
return new RowKey(tableId, (long) obj);
+ } else if (obj instanceof Integer) {
+ return new RowKey(tableId, ((Integer) obj).longValue());
}
- throw new TiExpressionException("Cannot encode row key with non-long type");
+ throw new TiExpressionException("Cannot encode row key with non-long or non-integer type");
}
public static RowKey createMin(long tableId) { | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.key;
import static com.pingcap.tikv.codec.Codec.IntegerCodec.writeLong;
import com.pingcap.tikv.codec.CodecDataOutput;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.exception.TiExpressionException;
import java.io.Serializable;
public class RowKey extends Key implements Serializable {
private static final byte[] REC_PREFIX_SEP = new byte[] {'_', 'r'};
private final long tableId;
private final long handle;
private final boolean maxHandleFlag;
private RowKey(long tableId, long handle) {
super(encode(tableId, handle));
this.tableId = tableId;
this.handle = handle;
this.maxHandleFlag = false;
}
/**
* The RowKey indicating maximum handle (its value exceeds Long.Max_Value)
*
* <p>Initializes an imaginary globally MAXIMUM rowKey with tableId.
*/
private RowKey(long tableId) {
super(encodeBeyondMaxHandle(tableId));
this.tableId = tableId;
this.handle = Long.MAX_VALUE;
this.maxHandleFlag = true;
}
public static RowKey toRowKey(long tableId, long handle) {
return new RowKey(tableId, handle);
}
public static RowKey toRowKey(long tableId, TypedKey handle) {
Object obj = handle.getValue();
if (obj instanceof Long) {
return new RowKey(tableId, (long) obj);
}
throw new TiExpressionException("Cannot encode row key with non-long type");
}
public static RowKey createMin(long tableId) {
return toRowKey(tableId, Long.MIN_VALUE);
}
public static RowKey createBeyondMax(long tableId) {
return new RowKey(tableId);
}
private static byte[] encode(long tableId, long handle) {
CodecDataOutput cdo = new CodecDataOutput();
encodePrefix(cdo, tableId);
writeLong(cdo, handle);
return cdo.toBytes();
}
private static byte[] encodeBeyondMaxHandle(long tableId) {
return prefixNext(encode(tableId, Long.MAX_VALUE));
}
@Override
public RowKey next() {
long handle = getHandle();
boolean maxHandleFlag = getMaxHandleFlag();
if (maxHandleFlag) {
throw new TiClientInternalException("Handle overflow for Long MAX");
}
if (handle == Long.MAX_VALUE) {
return createBeyondMax(tableId);
}
return new RowKey(tableId, handle + 1);
}
public long getTableId() {
return tableId;
}
public long getHandle() {
return handle;
}
private boolean getMaxHandleFlag() {
return maxHandleFlag;
}
@Override
public String toString() {
return Long.toString(handle);
}
private static void encodePrefix(CodecDataOutput cdo, long tableId) {
cdo.write(TBL_PREFIX);
writeLong(cdo, tableId);
cdo.write(REC_PREFIX_SEP);
}
public static class DecodeResult {
public long handle;
public enum Status {
MIN,
MAX,
EQUAL,
LESS,
GREATER,
UNKNOWN_INF
}
public Status status;
}
}
| 1 | 11,353 | how about `Cannot encode row key with non-integer type` directly? | pingcap-tispark | java |
@@ -42,6 +42,9 @@ public interface ExecutorLoader {
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows()
throws ExecutorManagerException;
+ Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedExecutions()
+ throws ExecutorManagerException;
+
Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(int execId)
throws ExecutorManagerException;
| 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import azkaban.executor.ExecutorLogEvent.EventType;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import java.io.File;
import java.time.Duration;
import java.util.List;
import java.util.Map;
public interface ExecutorLoader {
void uploadExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException;
ExecutableFlow fetchExecutableFlow(int execId)
throws ExecutorManagerException;
List<ExecutableFlow> fetchRecentlyFinishedFlows(Duration maxAge)
throws ExecutorManagerException;
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows()
throws ExecutorManagerException;
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows()
throws ExecutorManagerException;
Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(int execId)
throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(int skip, int num)
throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num) throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num, Status status) throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(String projContain,
String flowContains, String userNameContains, int status, long startData,
long endData, int skip, int num) throws ExecutorManagerException;
List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId,
final long startTime) throws ExecutorManagerException;
/**
* <pre>
* Fetch all executors from executors table
* Note:-
* 1 throws an Exception in case of a SQL issue
* 2 returns an empty list in case of no executor
* </pre>
*
* @return List<Executor>
*/
List<Executor> fetchAllExecutors() throws ExecutorManagerException;
/**
* <pre>
* Fetch all executors from executors table with active = true
* Note:-
* 1 throws an Exception in case of a SQL issue
* 2 returns an empty list in case of no active executor
* </pre>
*
* @return List<Executor>
*/
List<Executor> fetchActiveExecutors() throws ExecutorManagerException;
/**
* <pre>
* Fetch executor from executors with a given (host, port)
* Note:
* 1. throws an Exception in case of a SQL issue
* 2. return null when no executor is found
* with the given (host,port)
* </pre>
*
* @return Executor
*/
Executor fetchExecutor(String host, int port)
throws ExecutorManagerException;
/**
* <pre>
* Fetch executor from executors with a given executorId
* Note:
* 1. throws an Exception in case of a SQL issue
* 2. return null when no executor is found with the given executorId
* </pre>
*
* @return Executor
*/
Executor fetchExecutor(int executorId) throws ExecutorManagerException;
/**
* <pre>
* create an executor and insert in executors table.
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception if a executor with (host, port) already exist
* 3. return null when no executor is found with the given executorId
* </pre>
*
* @return Executor
*/
Executor addExecutor(String host, int port)
throws ExecutorManagerException;
/**
* <pre>
* create an executor and insert in executors table.
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception if there is no executor with the given id
* 3. return null when no executor is found with the given executorId
* </pre>
*/
void updateExecutor(Executor executor) throws ExecutorManagerException;
/**
* <pre>
* Remove the executor from executors table.
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception if there is no executor in the table* </pre>
* </pre>
*/
void removeExecutor(String host, int port) throws ExecutorManagerException;
/**
* <pre>
* Log an event in executor_event audit table Note:- throws an Exception in
* case of a SQL issue
* Note: throws an Exception in case of a SQL issue
* </pre>
*
* @return isSuccess
*/
void postExecutorEvent(Executor executor, EventType type, String user,
String message) throws ExecutorManagerException;
/**
* <pre>
* This method is to fetch events recorded in executor audit table, inserted
* by postExecutorEvents with a given executor, starting from skip
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. Returns an empty list in case of no events
* </pre>
*
* @return List<ExecutorLogEvent>
*/
List<ExecutorLogEvent> getExecutorEvents(Executor executor, int num,
int offset) throws ExecutorManagerException;
void addActiveExecutableReference(ExecutionReference ref)
throws ExecutorManagerException;
void removeActiveExecutableReference(int execId)
throws ExecutorManagerException;
/**
* <pre>
* Unset executor Id for an execution
* Note:-
* throws an Exception in case of a SQL issue
* </pre>
*/
void unassignExecutor(int executionId) throws ExecutorManagerException;
/**
* <pre>
* Set an executor Id to an execution
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. throws an Exception in case executionId or executorId do not exist
* </pre>
*/
void assignExecutor(int executorId, int execId)
throws ExecutorManagerException;
/**
* <pre>
* Fetches an executor corresponding to a given execution
* Note:-
* 1. throws an Exception in case of a SQL issue
* 2. return null when no executor is found with the given executionId
* </pre>
*
* @return fetched Executor
*/
Executor fetchExecutorByExecutionId(int executionId)
throws ExecutorManagerException;
/**
* <pre>
* Fetch queued flows which have not yet dispatched
* Note:
* 1. throws an Exception in case of a SQL issue
* 2. return empty list when no queued execution is found
* </pre>
*
* @return List of queued flows and corresponding execution reference
*/
List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows()
throws ExecutorManagerException;
boolean updateExecutableReference(int execId, long updateTime)
throws ExecutorManagerException;
LogData fetchLogs(int execId, String name, int attempt, int startByte,
int endByte) throws ExecutorManagerException;
List<Object> fetchAttachments(int execId, String name, int attempt)
throws ExecutorManagerException;
void uploadLogFile(int execId, String name, int attempt, File... files)
throws ExecutorManagerException;
void uploadAttachmentFile(ExecutableNode node, File file)
throws ExecutorManagerException;
void updateExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException;
void uploadExecutableNode(ExecutableNode node, Props inputParams)
throws ExecutorManagerException;
List<ExecutableJobInfo> fetchJobInfoAttempts(int execId, String jobId)
throws ExecutorManagerException;
ExecutableJobInfo fetchJobInfo(int execId, String jobId, int attempt)
throws ExecutorManagerException;
List<ExecutableJobInfo> fetchJobHistory(int projectId, String jobId,
int skip, int size) throws ExecutorManagerException;
void updateExecutableNode(ExecutableNode node)
throws ExecutorManagerException;
int fetchNumExecutableFlows(int projectId, String flowId)
throws ExecutorManagerException;
int fetchNumExecutableFlows() throws ExecutorManagerException;
int fetchNumExecutableNodes(int projectId, String jobId)
throws ExecutorManagerException;
Props fetchExecutionJobInputProps(int execId, String jobId)
throws ExecutorManagerException;
Props fetchExecutionJobOutputProps(int execId, String jobId)
throws ExecutorManagerException;
Pair<Props, Props> fetchExecutionJobProps(int execId, String jobId)
throws ExecutorManagerException;
int removeExecutionLogsByTime(long millis)
throws ExecutorManagerException;
int selectAndUpdateExecution(final int executorId) throws ExecutorManagerException;
}
| 1 | 17,108 | The method names `fetchUnfinishedExecutions` and `fetchUnfinishedFlows` are too similar to each other. Would it be better to use the name `fetchUnfinishedFlowsMetadata` since you are only fetching metadata info about the flow? | azkaban-azkaban | java |
@@ -26,6 +26,17 @@ module Travis
sh.cmd "wget -q -O tmate.tar.xz #{static_build_linux_url}", echo: false, retry: true
sh.cmd "tar --strip-components=1 -xf tmate.tar.xz", echo: false
end
+ sh.elif "$(uname) = 'FreeBSD'" do
+ sh.cmd 'sudo pkg install -y libevent msgpack libssh', echo: true
+ sh.cmd 'git clone https://github.com/tmate-io/tmate.git', echo: true
+ sh.cd 'tmate', echo: false, stack: true
+ sh.cmd 'git checkout 2.4.0', echo: true
+ sh.cmd './autogen.sh', echo: true
+ sh.cmd './configure', echo: true
+ sh.cmd 'make', echo: true
+ sh.cmd 'sudo make install', echo: true
+ sh.cd :back, echo: false, stack: true
+ end
sh.else do
sh.echo "We are setting up the debug environment. This may take a while..."
sh.cmd "brew update &> /dev/null", echo: false, retry: true | 1 | require 'shellwords'
require 'travis/build/appliances/base'
require 'travis/build/helpers/template'
module Travis
module Build
module Appliances
class DebugTools < Base
include Template
TEMPLATES_PATH = File.expand_path('templates', __FILE__.sub('.rb', ''))
def_delegators :script, :debug_enabled?, :debug_build_via_api?
def apply
(debug_enabled? || debug_build_via_api?) ? apply_enabled : apply_disabled
end
def apply_enabled
sh.raw 'function travis_debug_install() {'
sh.echo "Setting up debug tools.", ansi: :yellow
sh.mkdir install_dir, echo: false, recursive: true
sh.cd install_dir, echo: false, stack: true
sh.if "-z $(command -v tmate)" do
sh.if "$(uname) = 'Linux'" do
sh.cmd "wget -q -O tmate.tar.xz #{static_build_linux_url}", echo: false, retry: true
sh.cmd "tar --strip-components=1 -xf tmate.tar.xz", echo: false
end
sh.else do
sh.echo "We are setting up the debug environment. This may take a while..."
sh.cmd "brew update &> /dev/null", echo: false, retry: true
sh.cmd "brew install tmate &> /dev/null", echo: false, retry: true
end
end
sh.file "travis_debug.sh", bash('travis_debug', encode: true), decode: true
sh.chmod '+x', "travis_debug.sh", echo: false
sh.mkdir "${TRAVIS_HOME}/.ssh", echo: false, recursive: true
sh.cmd "cat /dev/zero | ssh-keygen -q -f ${TRAVIS_HOME}/.ssh/tmate -N '' &> /dev/null", echo: false
sh.file "${TRAVIS_HOME}/.tmate.conf", template("tmate.conf.erb", identity: "${TRAVIS_HOME}/.ssh/tmate")
sh.export 'PATH', "${PATH}:#{install_dir}", echo: false
sh.cd :back, echo: false, stack: true
sh.raw '}'
sh.raw 'function travis_debug() {'
sh.cmd 'rm ${TRAVIS_HOME}/.netrc'
sh.raw 'travis_debug_install'
sh.echo "Preparing debug sessions."
sh.raw 'TRAVIS_CMD=travis_debug'
sh.raw 'export TRAVIS_DEBUG_MODE=true'
sh.raw 'travis_debug.sh "$@"'
sh.raw '}'
end
def apply_disabled
sh.raw 'function travis_debug() {'
sh.echo "The debug environment is not available. Please contact support.", ansi: :red
sh.raw "false"
sh.raw '}'
end
private
def install_dir
"${TRAVIS_HOME}/.debug"
end
# XXX the following does not apply to OSX
def static_build_linux_url
if config[:arch] == 'arm64'
"https://#{app_host}/files/tmate-static-linux-arm64v8.tar.xz"
else
"https://#{app_host}/files/tmate-static-linux-amd64.tar.xz"
end
end
end
end
end
end
| 1 | 17,504 | Note that `sudo` is not available by default on BSDs; there are a few places in the codebase here where that's explicitly worked around by using `su`. | travis-ci-travis-build | rb |
@@ -0,0 +1,5 @@
+// Package importpath is used to implement a test on Go import paths.
+package importpath
+
+// Answer is the answer to Life, the Universe and Everything.
+const Answer = 42 | 1 | 1 | 8,037 | ultra nit: missing Oxford comma :P | thought-machine-please | go |
|
@@ -729,6 +729,16 @@ class SparkFrameMethods(object):
== Physical Plan ==
...
+ >>> df.spark.explain("extended") # doctest: +SKIP
+ == Parsed Logical Plan ==
+ ...
+ == Analyzed Logical Plan ==
+ ...
+ == Optimized Logical Plan ==
+ ...
+ == Physical Plan ==
+ ...
+
>>> df.spark.explain(mode="extended") # doctest: +ELLIPSIS
== Parsed Logical Plan ==
... | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Spark related features. Usually, the features here are missing in pandas
but Spark has it.
"""
from distutils.version import LooseVersion
from typing import TYPE_CHECKING, Optional, Union, List
import pyspark
from pyspark import StorageLevel
from pyspark.sql import Column
from pyspark.sql import DataFrame as SparkDataFrame
if TYPE_CHECKING:
import databricks.koalas as ks
from databricks.koalas.base import IndexOpsMixin
from databricks.koalas.frame import CachedDataFrame
class SparkIndexOpsMethods(object):
"""Spark related features. Usually, the features here are missing in pandas
but Spark has it."""
def __init__(self, data: Union["IndexOpsMixin"]):
self._data = data
@property
def data_type(self):
""" Returns the data type as defined by Spark, as a Spark DataType object."""
return self._data._internal.spark_type_for(self._data._internal.column_labels[0])
@property
def nullable(self):
""" Returns the nullability as defined by Spark. """
return self._data._internal.spark_column_nullable_for(self._data._internal.column_labels[0])
@property
def column(self):
"""
Spark Column object representing the Series/Index.
.. note:: This Spark Column object is strictly stick to its base DataFrame the Series/Index
was derived from.
"""
return self._data._internal.spark_column
def transform(self, func):
"""
Applies a function that takes and returns a Spark column. It allows to natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index. The output length of the Spark column should be same as input's.
.. note:: It requires to have the same input and output length; therefore,
the aggregate Spark functions such as count does not work.
Parameters
----------
func : function
Function to use for transforming the data by using Spark columns.
Returns
-------
Series or Index
Raises
------
ValueError : If the output from the function is not a Spark column.
Examples
--------
>>> from pyspark.sql.functions import log
>>> df = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
>>> df.a.spark.transform(lambda c: log(c))
0 0.000000
1 0.693147
2 1.098612
Name: a, dtype: float64
>>> df.index.spark.transform(lambda c: c + 10)
Int64Index([10, 11, 12], dtype='int64')
>>> df.a.spark.transform(lambda c: c + df.b.spark.column)
0 5
1 7
2 9
Name: a, dtype: int64
"""
from databricks.koalas import MultiIndex
if isinstance(self._data, MultiIndex):
raise NotImplementedError("MultiIndex does not support spark.transform yet.")
output = func(self._data.spark.column)
if not isinstance(output, Column):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.Column; however, got [%s]." % (func, type(output))
)
new_ser = self._data._with_new_scol(scol=output).rename(self._data.name)
# Trigger the resolution so it throws an exception if anything does wrong
# within the function, for example,
# `df1.a.spark.transform(lambda _: F.col("non-existent"))`.
new_ser._internal.to_internal_spark_frame
return new_ser
def apply(self, func):
"""
Applies a function that takes and returns a Spark column. It allows to natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index.
.. note:: It forces to lose the index and end up with using default index. It is
preferred to use :meth:`Series.spark.transform` or `:meth:`DataFrame.spark.apply`
with specifying the `inedx_col`.
.. note:: It does not require to have the same length of the input and output.
However, it requires to create a new DataFrame internally which will require
to set `compute.ops_on_diff_frames` to compute even with the same origin
DataFrame that is expensive, whereas :meth:`Series.spark.transform` does not
require it.
Parameters
----------
func : function
Function to apply the function against the data by using Spark columns.
Returns
-------
Series
Raises
------
ValueError : If the output from the function is not a Spark column.
Examples
--------
>>> from databricks import koalas as ks
>>> from pyspark.sql.functions import count, lit
>>> df = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
>>> df.a.spark.apply(lambda c: count(c))
0 3
Name: a, dtype: int64
>>> df.a.spark.apply(lambda c: c + df.b.spark.column)
0 5
1 7
2 9
Name: a, dtype: int64
"""
from databricks.koalas import Index, DataFrame, Series
from databricks.koalas.series import first_series
from databricks.koalas.internal import HIDDEN_COLUMNS
if isinstance(self._data, Index):
raise NotImplementedError("Index does not support spark.apply yet.")
output = func(self._data.spark.column)
if not isinstance(output, Column):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.Column; however, got [%s]." % (func, type(output))
)
assert isinstance(self._data, Series)
sdf = self._data._internal.spark_frame.drop(*HIDDEN_COLUMNS).select(output)
# Lose index.
kdf = DataFrame(sdf)
kdf.columns = [self._data.name]
return first_series(kdf)
class SparkFrameMethods(object):
"""Spark related features. Usually, the features here are missing in pandas
but Spark has it."""
def __init__(self, frame: "ks.DataFrame"):
self._kdf = frame
def schema(self, index_col=None):
"""
Returns the underlying Spark schema.
Returns
-------
pyspark.sql.types.StructType
The underlying Spark schema.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.spark.schema().simpleString()
'struct<a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>'
>>> df.spark.schema(index_col='index').simpleString()
'struct<index:bigint,a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>'
"""
return self.frame(index_col).schema
def print_schema(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Prints out the underlying Spark schema in the tree format.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.spark.print_schema() # doctest: +NORMALIZE_WHITESPACE
root
|-- a: string (nullable = false)
|-- b: long (nullable = false)
|-- c: byte (nullable = false)
|-- d: double (nullable = false)
|-- e: boolean (nullable = false)
|-- f: timestamp (nullable = false)
>>> df.spark.print_schema(index_col='index') # doctest: +NORMALIZE_WHITESPACE
root
|-- index: long (nullable = false)
|-- a: string (nullable = false)
|-- b: long (nullable = false)
|-- c: byte (nullable = false)
|-- d: double (nullable = false)
|-- e: boolean (nullable = false)
|-- f: timestamp (nullable = false)
"""
self.frame(index_col).printSchema()
def frame(self, index_col=None):
"""
Return the current DataFrame as a Spark DataFrame. :meth:`DataFrame.spark.frame` is an
alias of :meth:`DataFrame.to_spark`.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
See Also
--------
DataFrame.to_spark
DataFrame.to_koalas
DataFrame.spark.frame
Examples
--------
By default, this method loses the index as below.
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.spark.frame().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
If `index_col` is set, it keeps the index column as specified.
>>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE
+-----+---+---+---+
|index| a| b| c|
+-----+---+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-----+---+---+---+
Keeping index column is useful when you want to call some Spark APIs and
convert it back to Koalas DataFrame without creating a default index, which
can affect performance.
>>> spark_df = df.to_spark(index_col="index")
>>> spark_df = spark_df.filter("a == 2")
>>> spark_df.to_koalas(index_col="index") # doctest: +NORMALIZE_WHITESPACE
a b c
index
1 2 5 8
In case of multi-index, specify a list to `index_col`.
>>> new_df = df.set_index("a", append=True)
>>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"])
>>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE
+-------+-------+---+---+
|index_1|index_2| b| c|
+-------+-------+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-------+-------+---+---+
Likewise, can be converted to back to Koalas DataFrame.
>>> new_spark_df.to_koalas(
... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE
b c
index_1 index_2
0 1 4 7
1 2 5 8
2 3 6 9
"""
from databricks.koalas.utils import name_like_string
kdf = self._kdf
data_column_names = []
data_columns = []
for i, (label, spark_column, column_name) in enumerate(
zip(
kdf._internal.column_labels,
kdf._internal.data_spark_columns,
kdf._internal.data_spark_column_names,
)
):
name = str(i) if label is None else name_like_string(label)
data_column_names.append(name)
if column_name != name:
spark_column = spark_column.alias(name)
data_columns.append(spark_column)
if index_col is None:
return kdf._internal.spark_frame.select(data_columns)
else:
if isinstance(index_col, str):
index_col = [index_col]
old_index_scols = kdf._internal.index_spark_columns
if len(index_col) != len(old_index_scols):
raise ValueError(
"length of index columns is %s; however, the length of the given "
"'index_col' is %s." % (len(old_index_scols), len(index_col))
)
if any(col in data_column_names for col in index_col):
raise ValueError("'index_col' cannot be overlapped with other columns.")
new_index_scols = [
index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col)
]
return kdf._internal.spark_frame.select(new_index_scols + data_columns)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
If you want to specify the StorageLevel manually, use :meth:`DataFrame.spark.persist`
See Also
--------
DataFrame.spark.persist
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.spark.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
Name: 0, dtype: int64
>>> df = df.spark.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
from databricks.koalas.frame import CachedDataFrame
self._kdf._internal = self._kdf._internal.resolved_copy
return CachedDataFrame(self._kdf._internal)
def persist(self, storage_level=StorageLevel.MEMORY_AND_DISK):
"""
Yields and caches the current DataFrame with a specific StorageLevel.
If a StogeLevel is not given, the `MEMORY_AND_DISK` level is used by default like PySpark.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
See Also
--------
DataFrame.spark.cache
Examples
--------
>>> import pyspark
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
Set the StorageLevel to `MEMORY_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Memory Serialized 1x Replicated
dogs 4
cats 4
Name: 0, dtype: int64
Set the StorageLevel to `DISK_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.DISK_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Disk Serialized 1x Replicated
dogs 4
cats 4
Name: 0, dtype: int64
If a StorageLevel is not given, it uses `MEMORY_AND_DISK` by default.
>>> with df.spark.persist() as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Disk Memory Serialized 1x Replicated
dogs 4
cats 4
Name: 0, dtype: int64
>>> df = df.spark.persist()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
from databricks.koalas.frame import CachedDataFrame
return CachedDataFrame(self._kdf._internal, storage_level=storage_level)
def hint(self, name: str, *parameters) -> "ks.DataFrame":
"""
Specifies some hint on the current DataFrame.
Parameters
----------
name : A name of the hint.
parameters : Optional parameters.
Returns
-------
ret : DataFrame with the hint.
See Also
--------
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> merged = df1.merge(df2.spark.hint("broadcast"), left_on='lkey', right_on='rkey')
>>> merged.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
from databricks.koalas.frame import DataFrame
return DataFrame(
self._kdf._internal.with_new_sdf(
self._kdf._internal.spark_frame.hint(name, *parameters)
)
)
def to_table(
self,
name: str,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Union[str, List[str], None] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
):
"""
Write the DataFrame into a Spark table. :meth:`DataFrame.spark.to_table`
is an alias of :meth:`DataFrame.to_table`.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the table exists
already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.spark.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self._kdf.spark.frame(index_col=index_col).write.saveAsTable(
name=name, format=format, mode=mode, partitionBy=partition_cols, **options
)
def to_spark_io(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Union[str, List[str], None] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
):
"""Write the DataFrame out to a Spark data source. :meth:`DataFrame.spark.to_spark_io`
is an alias of :meth:`DataFrame.to_spark_io`.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
DataFrame.spark.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self._kdf.spark.frame(index_col=index_col).write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, **options
)
def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None):
"""
Prints the underlying (logical and physical) Spark plans to the console for debugging
purpose.
Parameters
----------
extended : boolean, default ``False``.
If ``False``, prints only the physical plan.
mode : string, default ``None``.
The expected output format of plans.
Examples
--------
>>> df = ks.DataFrame({'id': range(10)})
>>> df.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
>>> df.spark.explain(True) # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.spark.explain(mode="extended") # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if LooseVersion(pyspark.__version__) < LooseVersion("3.0"):
if mode is not None:
if extended is not None:
raise Exception("extended and mode can not be specified simultaneously")
elif mode == "simple":
extended = False
elif mode == "extended":
extended = True
else:
raise ValueError(
"Unknown spark.explain mode: {}. Accepted spark.explain modes are "
"'simple', 'extended'.".format(mode)
)
if extended is None:
extended = False
self._kdf._internal.to_internal_spark_frame.explain(extended)
else:
self._kdf._internal.to_internal_spark_frame.explain(extended, mode)
def apply(self, func, index_col=None):
"""
Applies a function that takes and returns a Spark DataFrame. It allows natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index.
.. note:: set `index_col` and keep the column named as so in the output Spark
DataFrame to avoid using the default index to prevent performance penalty.
If you omit `index_col`, it will use default index which is potentially
expensive in general.
.. note:: it will lose column labels. This is a synonym of
``func(kdf.to_spark(index_col)).to_koalas(index_col)``.
Parameters
----------
func : function
Function to apply the function against the data by using Spark DataFrame.
Returns
-------
DataFrame
Raises
------
ValueError : If the output from the function is not a Spark DataFrame.
Examples
--------
>>> from databricks import koalas as ks
>>> kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> kdf
a b
0 1 4
1 2 5
2 3 6
>>> kdf.spark.apply(
... lambda sdf: sdf.selectExpr("a + b as c", "index"), index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
c
index
0 5
1 7
2 9
The case below ends up with using the default index, which should be avoided
if possible.
>>> kdf.spark.apply(lambda sdf: sdf.groupby("a").count().sort("a"))
a count
0 1 1
1 2 1
2 3 1
"""
output = func(self.frame(index_col))
if not isinstance(output, SparkDataFrame):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.DataFrame; however, got [%s]." % (func, type(output))
)
return output.to_koalas(index_col)
class CachedSparkFrameMethods(SparkFrameMethods):
"""Spark related features for cached DataFrame. This is usually created via
`df.spark.cache()`."""
def __init__(self, frame: "CachedDataFrame"):
super().__init__(frame)
@property
def storage_level(self):
"""
Return the storage level of this cache.
Examples
--------
>>> import databricks.koalas as ks
>>> import pyspark
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.spark.cache() as cached_df:
... print(cached_df.spark.storage_level)
...
Disk Memory Deserialized 1x Replicated
Set the StorageLevel to `MEMORY_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
...
Memory Serialized 1x Replicated
"""
return self._kdf._cached.storageLevel
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.spark.cache()
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
if self._kdf._cached.is_cached:
self._kdf._cached.unpersist()
| 1 | 15,449 | This is not supported in Spark 3.0.0-rc2 yet. I'd skip this for now. | databricks-koalas | py |
@@ -13,5 +13,6 @@ import (
// like sending and awaiting mined ones.
type Message interface {
Send(ctx context.Context, from, to types.Address, val *types.AttoFIL, method string, params ...interface{}) (*cid.Cid, error)
+ Query(ctx context.Context, from, to types.Address, method string, params ...interface{}) ([][]byte, *exec.FunctionSignature, error)
Wait(ctx context.Context, msgCid *cid.Cid, cb func(blk *types.Block, msg *types.SignedMessage, receipt *types.MessageReceipt, signature *exec.FunctionSignature) error) error
} | 1 | package api
import (
"context"
"gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
"github.com/filecoin-project/go-filecoin/exec"
"github.com/filecoin-project/go-filecoin/types"
)
// Message is the interface that defines methods to manage various message operations,
// like sending and awaiting mined ones.
type Message interface {
Send(ctx context.Context, from, to types.Address, val *types.AttoFIL, method string, params ...interface{}) (*cid.Cid, error)
Wait(ctx context.Context, msgCid *cid.Cid, cb func(blk *types.Block, msg *types.SignedMessage, receipt *types.MessageReceipt, signature *exec.FunctionSignature) error) error
}
| 1 | 13,717 | BLOCKING: Why does `Query` return an `*exec.FunctionSignature`? | filecoin-project-venus | go |
@@ -562,14 +562,14 @@ func (c *ConfigLocal) MaxDirBytes() uint64 {
return c.maxDirBytes
}
-// ResetCaches implements the Config interface for ConfigLocal.
-func (c *ConfigLocal) ResetCaches() {
+func (c *ConfigLocal) resetCachesWithoutShutdown() DirtyBlockCache {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = NewMDCacheStandard(5000)
c.kcache = NewKeyCacheStandard(5000)
// Limit the block cache to 10K entries or 1024 blocks (currently 512MiB)
c.bcache = NewBlockCacheStandard(c, 10000, MaxBlockSizeBytesDefault*1024)
+ oldDirtyBcache := c.dirtyBcache
minFactor := 1
if maxParallelBlockPuts > 10 {
minFactor = maxParallelBlockPuts / 10 | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"sync"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol"
metrics "github.com/rcrowley/go-metrics"
"golang.org/x/net/context"
)
const (
// Max supported plaintext size of a file in KBFS. TODO: increase
// this once we support multiple levels of indirection.
maxFileBytesDefault = 2 * 1024 * 1024 * 1024
// Max supported size of a directory entry name.
maxNameBytesDefault = 255
// Maximum supported plaintext size of a directory in KBFS. TODO:
// increase this once we support levels of indirection for
// directories.
maxDirBytesDefault = MaxBlockSizeBytesDefault
// Default time after setting the rekey bit before prompting for a
// paper key.
rekeyWithPromptWaitTimeDefault = 10 * time.Minute
// How often do we check for stuff to reclaim?
qrPeriodDefault = 1 * time.Minute
// How long must something be unreferenced before we reclaim it?
qrUnrefAgeDefault = 1 * time.Minute
// tlfValidDurationDefault is the default for tlf validity before redoing identify.
tlfValidDurationDefault = 6 * time.Hour
)
// ConfigLocal implements the Config interface using purely local
// server objects (no KBFS operations used RPCs).
type ConfigLocal struct {
lock sync.RWMutex
kbfs KBFSOps
keyman KeyManager
rep Reporter
kcache KeyCache
bcache BlockCache
dirtyBcache DirtyBlockCache
codec Codec
mdops MDOps
kops KeyOps
crypto Crypto
mdcache MDCache
bops BlockOps
mdserv MDServer
bserv BlockServer
keyserv KeyServer
daemon KeybaseDaemon
bsplit BlockSplitter
notifier Notifier
clock Clock
kbpki KBPKI
renamer ConflictRenamer
registry metrics.Registry
loggerFn func(prefix string) logger.Logger
noBGFlush bool // logic opposite so the default value is the common setting
rwpWaitTime time.Duration
maxFileBytes uint64
maxNameBytes uint32
maxDirBytes uint64
rekeyQueue RekeyQueue
qrPeriod time.Duration
qrUnrefAge time.Duration
// allKnownConfigsForTesting is used for testing, and contains all created
// Config objects in this test.
allKnownConfigsForTesting *[]Config
// tlfValidDuration is the time TLFs are valid before redoing identification.
tlfValidDuration time.Duration
}
var _ Config = (*ConfigLocal)(nil)
// LocalUser represents a fake KBFS user, useful for testing.
type LocalUser struct {
UserInfo
Asserts []string
// Index into UserInfo.CryptPublicKeys.
CurrentCryptPublicKeyIndex int
// Index into UserInfo.VerifyingKeys.
CurrentVerifyingKeyIndex int
}
// GetCurrentCryptPublicKey returns this LocalUser's public encryption key.
func (lu *LocalUser) GetCurrentCryptPublicKey() CryptPublicKey {
return lu.CryptPublicKeys[lu.CurrentCryptPublicKeyIndex]
}
// GetCurrentVerifyingKey returns this LocalUser's public signing key.
func (lu *LocalUser) GetCurrentVerifyingKey() VerifyingKey {
return lu.VerifyingKeys[lu.CurrentVerifyingKeyIndex]
}
func verifyingKeysToPublicKeys(keys []VerifyingKey) []keybase1.PublicKey {
publicKeys := make([]keybase1.PublicKey, len(keys))
for i, key := range keys {
publicKeys[i] = keybase1.PublicKey{
KID: key.kid,
IsSibkey: true,
}
}
return publicKeys
}
func cryptPublicKeysToPublicKeys(keys []CryptPublicKey) []keybase1.PublicKey {
publicKeys := make([]keybase1.PublicKey, len(keys))
for i, key := range keys {
publicKeys[i] = keybase1.PublicKey{
KID: key.kid,
IsSibkey: false,
}
}
return publicKeys
}
// GetPublicKeys returns all of this LocalUser's public encryption keys.
func (lu *LocalUser) GetPublicKeys() []keybase1.PublicKey {
sibkeys := verifyingKeysToPublicKeys(lu.VerifyingKeys)
subkeys := cryptPublicKeysToPublicKeys(lu.CryptPublicKeys)
return append(sibkeys, subkeys...)
}
// Helper functions to get a various keys for a local user suitable
// for use with CryptoLocal. Each function will return the same key
// will always be returned for a given user.
// MakeLocalUserSigningKeyOrBust returns a unique signing key for this user.
func MakeLocalUserSigningKeyOrBust(name libkb.NormalizedUsername) SigningKey {
return MakeFakeSigningKeyOrBust(string(name) + " signing key")
}
// MakeLocalUserVerifyingKeyOrBust makes a new verifying key
// corresponding to the signing key for this user.
func MakeLocalUserVerifyingKeyOrBust(name libkb.NormalizedUsername) VerifyingKey {
return MakeLocalUserSigningKeyOrBust(name).GetVerifyingKey()
}
// MakeLocalUserCryptPrivateKeyOrBust returns a unique private
// encryption key for this user.
func MakeLocalUserCryptPrivateKeyOrBust(name libkb.NormalizedUsername) CryptPrivateKey {
return MakeFakeCryptPrivateKeyOrBust(string(name) + " crypt key")
}
// MakeLocalUserCryptPublicKeyOrBust returns the public key
// corresponding to the crypt private key for this user.
func MakeLocalUserCryptPublicKeyOrBust(name libkb.NormalizedUsername) CryptPublicKey {
return MakeLocalUserCryptPrivateKeyOrBust(name).getPublicKey()
}
// MakeLocalUsers is a helper function to generate a list of
// LocalUsers suitable to use with KBPKILocal.
func MakeLocalUsers(users []libkb.NormalizedUsername) []LocalUser {
localUsers := make([]LocalUser, len(users))
for i := 0; i < len(users); i++ {
verifyingKey := MakeLocalUserVerifyingKeyOrBust(users[i])
cryptPublicKey := MakeLocalUserCryptPublicKeyOrBust(users[i])
localUsers[i] = LocalUser{
UserInfo: UserInfo{
Name: users[i],
UID: keybase1.MakeTestUID(uint32(i + 1)),
VerifyingKeys: []VerifyingKey{verifyingKey},
CryptPublicKeys: []CryptPublicKey{cryptPublicKey},
KIDNames: map[keybase1.KID]string{
verifyingKey.KID(): "dev1",
},
},
CurrentCryptPublicKeyIndex: 0,
CurrentVerifyingKeyIndex: 0,
}
}
return localUsers
}
// NewConfigLocal constructs a new ConfigLocal with default components.
func NewConfigLocal() *ConfigLocal {
config := &ConfigLocal{}
config.SetClock(wallClock{})
config.SetReporter(NewReporterSimple(config.Clock(), 10))
config.SetConflictRenamer(WriterDeviceDateConflictRenamer{config})
config.ResetCaches()
config.SetCodec(NewCodecMsgpack())
config.SetBlockOps(&BlockOpsStandard{config})
config.SetKeyOps(&KeyOpsStandard{config})
config.SetRekeyQueue(NewRekeyQueueStandard(config))
config.maxFileBytes = maxFileBytesDefault
config.maxNameBytes = maxNameBytesDefault
config.maxDirBytes = maxDirBytesDefault
config.rwpWaitTime = rekeyWithPromptWaitTimeDefault
config.qrPeriod = qrPeriodDefault
config.qrUnrefAge = qrUnrefAgeDefault
// Don't bother creating the registry if UseNilMetrics is set.
if !metrics.UseNilMetrics {
registry := metrics.NewRegistry()
config.SetMetricsRegistry(registry)
}
config.tlfValidDuration = tlfValidDurationDefault
return config
}
// KBFSOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KBFSOps() KBFSOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbfs
}
// SetKBFSOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKBFSOps(k KBFSOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbfs = k
}
// KBPKI implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KBPKI() KBPKI {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbpki
}
// SetKBPKI implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKBPKI(k KBPKI) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbpki = k
}
// KeyManager implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyManager() KeyManager {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyman
}
// SetKeyManager implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyManager(k KeyManager) {
c.lock.Lock()
defer c.lock.Unlock()
c.keyman = k
}
// Reporter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Reporter() Reporter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.rep
}
// SetReporter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetReporter(r Reporter) {
c.lock.Lock()
defer c.lock.Unlock()
c.rep = r
}
// KeyCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyCache() KeyCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kcache
}
// SetKeyCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyCache(k KeyCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.kcache = k
}
// BlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockCache() BlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bcache
}
// SetBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockCache(b BlockCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.bcache = b
}
// DirtyBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DirtyBlockCache() DirtyBlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.dirtyBcache
}
// SetDirtyBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDirtyBlockCache(d DirtyBlockCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.dirtyBcache = d
}
// Crypto implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Crypto() Crypto {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// SetCrypto implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetCrypto(cr Crypto) {
c.lock.Lock()
defer c.lock.Unlock()
c.crypto = cr
}
// Codec implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Codec() Codec {
c.lock.RLock()
defer c.lock.RUnlock()
return c.codec
}
// SetCodec implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetCodec(co Codec) {
c.lock.Lock()
defer c.lock.Unlock()
c.codec = co
RegisterOps(c.codec)
}
// MDOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDOps() MDOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdops
}
// SetMDOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDOps(m MDOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdops = m
}
// KeyOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyOps() KeyOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kops
}
// SetKeyOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyOps(k KeyOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.kops = k
}
// MDCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDCache() MDCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdcache
}
// SetMDCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDCache(m MDCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = m
}
// BlockOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockOps() BlockOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bops
}
// SetBlockOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockOps(b BlockOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.bops = b
}
// MDServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDServer() MDServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdserv
}
// SetMDServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDServer(m MDServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdserv = m
}
// BlockServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockServer() BlockServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bserv
}
// SetBlockServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockServer(b BlockServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.bserv = b
}
// KeyServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyServer() KeyServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyserv
}
// SetKeyServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyServer(k KeyServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.keyserv = k
}
// KeybaseDaemon implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeybaseDaemon() KeybaseDaemon {
c.lock.RLock()
defer c.lock.RUnlock()
return c.daemon
}
// SetKeybaseDaemon implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeybaseDaemon(k KeybaseDaemon) {
c.lock.Lock()
defer c.lock.Unlock()
c.daemon = k
}
// BlockSplitter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockSplitter() BlockSplitter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bsplit
}
// SetBlockSplitter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockSplitter(b BlockSplitter) {
c.lock.Lock()
defer c.lock.Unlock()
c.bsplit = b
}
// Notifier implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Notifier() Notifier {
c.lock.RLock()
defer c.lock.RUnlock()
return c.notifier
}
// SetNotifier implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetNotifier(n Notifier) {
c.lock.Lock()
defer c.lock.Unlock()
c.notifier = n
}
// Clock implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Clock() Clock {
c.lock.RLock()
defer c.lock.RUnlock()
return c.clock
}
// SetClock implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetClock(cl Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = cl
}
// ConflictRenamer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ConflictRenamer() ConflictRenamer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.renamer
}
// SetConflictRenamer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetConflictRenamer(cr ConflictRenamer) {
c.lock.Lock()
defer c.lock.Unlock()
c.renamer = cr
}
// MetadataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MetadataVersion() MetadataVer {
return InitialExtraMetadataVer
}
// DataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DataVersion() DataVer {
return FilesWithHolesDataVer
}
// DoBackgroundFlushes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DoBackgroundFlushes() bool {
return !c.noBGFlush
}
// RekeyWithPromptWaitTime implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) RekeyWithPromptWaitTime() time.Duration {
return c.rwpWaitTime
}
// QuotaReclamationPeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) QuotaReclamationPeriod() time.Duration {
return c.qrPeriod
}
// QuotaReclamationMinUnrefAge implements the Config interface for ConfigLocal.
func (c *ConfigLocal) QuotaReclamationMinUnrefAge() time.Duration {
return c.qrUnrefAge
}
// ReqsBufSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ReqsBufSize() int {
return 20
}
// MaxFileBytes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaxFileBytes() uint64 {
return c.maxFileBytes
}
// MaxNameBytes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaxNameBytes() uint32 {
return c.maxNameBytes
}
// MaxDirBytes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaxDirBytes() uint64 {
return c.maxDirBytes
}
// ResetCaches implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ResetCaches() {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = NewMDCacheStandard(5000)
c.kcache = NewKeyCacheStandard(5000)
// Limit the block cache to 10K entries or 1024 blocks (currently 512MiB)
c.bcache = NewBlockCacheStandard(c, 10000, MaxBlockSizeBytesDefault*1024)
minFactor := 1
if maxParallelBlockPuts > 10 {
minFactor = maxParallelBlockPuts / 10
}
// The minimum number of bytes we'll try to sync in parallel.
// This should be roughly the minimum amount of bytes we expect
// our worst supported connection to send within the timeout
// forced on us by the upper layer (19 seconds on OS X). With the
// current defaults, this minimum works out to ~5MB, so we can
// support a connection of ~270 KB/s. The buffer size will
// increase as more data gets pushed over the connection without
// risking timeouts.
minSyncBufferSize := int64(MaxBlockSizeBytesDefault * minFactor)
// The maximum number of bytes we can try to sync at once (also
// limits the amount of memory used by dirty blocks). We make it
// slightly bigger than the max number of parallel bytes in order
// to reserve reuse put "slots" while waiting for earlier puts to
// finish. This also limits the maxinim amount of memory used by
// the dirty block cache (to around 100MB with the current
// defaults).
maxSyncBufferSize :=
int64(MaxBlockSizeBytesDefault * maxParallelBlockPuts * 2)
c.dirtyBcache = NewDirtyBlockCacheStandard(c.clock, c.MakeLogger,
minSyncBufferSize, maxSyncBufferSize)
}
// MakeLogger implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MakeLogger(module string) logger.Logger {
c.lock.RLock()
defer c.lock.RUnlock()
if c.loggerFn == nil {
return nil
}
return c.loggerFn(module)
}
// SetLoggerMaker implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetLoggerMaker(
loggerFn func(module string) logger.Logger) {
c.lock.Lock()
defer c.lock.Unlock()
c.loggerFn = loggerFn
}
// NewConfigLocalWithCryptoForSigning initializes a local crypto
// config w/a crypto interface, using the given signing key, that can
// be used for non-PKI crypto.
func NewConfigLocalWithCryptoForSigning(signingKey SigningKey) *ConfigLocal {
config := NewConfigLocal()
config.SetLoggerMaker(func(m string) logger.Logger {
return logger.NewNull()
})
cryptPrivateKey := MakeLocalUserCryptPrivateKeyOrBust("nobody")
crypto := NewCryptoLocal(config, signingKey, cryptPrivateKey)
config.SetCrypto(crypto)
return config
}
// NewConfigLocalWithCrypto initializes a local crypto config w/a crypto interface that can be used for non-PKI crypto.
func NewConfigLocalWithCrypto() *ConfigLocal {
signingKey := MakeLocalUserSigningKeyOrBust("nobody")
return NewConfigLocalWithCryptoForSigning(signingKey)
}
// MetricsRegistry implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MetricsRegistry() metrics.Registry {
return c.registry
}
// SetRekeyQueue implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetRekeyQueue(r RekeyQueue) {
c.rekeyQueue = r
}
// RekeyQueue implements the Config interface for ConfigLocal.
func (c *ConfigLocal) RekeyQueue() RekeyQueue {
return c.rekeyQueue
}
// SetMetricsRegistry implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMetricsRegistry(r metrics.Registry) {
c.registry = r
}
// SetTLFValidDuration implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTLFValidDuration(r time.Duration) {
c.tlfValidDuration = r
}
// TLFValidDuration implements the Config interface for ConfigLocal.
func (c *ConfigLocal) TLFValidDuration() time.Duration {
return c.tlfValidDuration
}
// Shutdown implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Shutdown() error {
c.RekeyQueue().Clear()
c.RekeyQueue().Wait(context.Background())
if c.CheckStateOnShutdown() {
// Before we do anything, wait for all archiving to finish.
for _, config := range *c.allKnownConfigsForTesting {
kbfsOps, ok := config.KBFSOps().(*KBFSOpsStandard)
if !ok {
continue
}
for _, fbo := range kbfsOps.ops {
err := fbo.fbm.waitForArchives(context.Background())
if err != nil {
return err
}
}
}
}
var errors []error
err := c.KBFSOps().Shutdown()
if err != nil {
errors = append(errors, err)
// Continue with shutdown regardless of err.
}
c.MDServer().Shutdown()
c.KeyServer().Shutdown()
c.KeybaseDaemon().Shutdown()
c.BlockServer().Shutdown()
c.Crypto().Shutdown()
c.Reporter().Shutdown()
err = c.DirtyBlockCache().Shutdown()
if err != nil {
errors = append(errors, err)
}
if len(errors) == 1 {
return errors[0]
} else if len(errors) > 1 {
// Aggregate errors
return fmt.Errorf("Multiple errors on shutdown: %v", errors)
}
return nil
}
// CheckStateOnShutdown implements the Config interface for ConfigLocal.
func (c *ConfigLocal) CheckStateOnShutdown() bool {
if md, ok := c.MDServer().(*MDServerLocal); ok {
return !md.isShutdown()
}
return false
}
| 1 | 11,803 | please move this down to immediately above the assignment to `c.dirtyBcache` | keybase-kbfs | go |
@@ -63,3 +63,19 @@ func Example() {
// Output:
// foo.com running on port 80
}
+
+func Example_openVariable() {
+ // OpenVariable creates a *runtimevar.Variable from a URL.
+ // This example watches a variable based on a file-based blob.Bucket with JSON.
+ ctx := context.Background()
+ v, err := runtimevar.OpenVariable(ctx, "blob://myvar.json?bucket=file:///mypath&decoder=json")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ snapshot, err := v.Watch(ctx)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, _ = snapshot, err
+} | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package blobvar_test
import (
"context"
"fmt"
"log"
"gocloud.dev/blob/memblob"
"gocloud.dev/runtimevar"
"gocloud.dev/runtimevar/blobvar"
)
// MyConfig is a sample configuration struct.
type MyConfig struct {
Server string
Port int
}
func Example() {
// Create a *blob.Bucket.
// Here, we use an in-memory implementation and write a sample
// configuration value.
bucket := memblob.OpenBucket(nil)
ctx := context.Background()
err := bucket.WriteAll(ctx, "cfg-variable-name", []byte(`{"Server": "foo.com", "Port": 80}`), nil)
if err != nil {
log.Fatal(err)
}
// Create a decoder for decoding JSON strings into MyConfig.
decoder := runtimevar.NewDecoder(MyConfig{}, runtimevar.JSONDecode)
// Construct a *runtimevar.Variable that watches the blob.
v, err := blobvar.NewVariable(bucket, "cfg-variable-name", decoder, nil)
if err != nil {
log.Fatal(err)
}
defer v.Close()
// We can now read the current value of the variable from v.
snapshot, err := v.Watch(ctx)
if err != nil {
log.Fatal(err)
}
// runtimevar.Snapshot.Value is decoded to type MyConfig.
cfg := snapshot.Value.(MyConfig)
fmt.Printf("%s running on port %d", cfg.Server, cfg.Port)
// Output:
// foo.com running on port 80
}
| 1 | 15,146 | Don't need the `, err` part since you've already handled it. | google-go-cloud | go |
@@ -222,4 +222,18 @@ TEST_CASE("negative charge queries. Part of testing changes for github #2604",
CHECK(nWarnings == 1);
CHECK(nErrors == 0);
}
+}
+
+TEST_CASE("GithHub #2954: Reaction Smarts with Dative Bonds not parsed",
+ "[Reaction, Bug]") {
+
+ SECTION("Rxn Smart Processing with Dative Bond") {
+ unique_ptr<ChemicalReaction> rxn(
+ RxnSmartsToChemicalReaction("[O:1].[H+]>>[O:1]->[H+]")
+ );
+ REQUIRE(rxn);
+ auto k = rxn->getProducts()[0]->getNumAtoms();
+ CHECK(k == 2);
+ }
+
} | 1 | //
// Copyright (c) 2018 Greg Landrum
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
///
#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do
// this in one cpp file
#include "catch.hpp"
#include <GraphMol/RDKitBase.h>
#include <GraphMol/QueryOps.h>
#include <GraphMol/QueryAtom.h>
#include <GraphMol/MonomerInfo.h>
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/FileParsers/SequenceParsers.h>
#include <GraphMol/ChemReactions/Reaction.h>
#include <GraphMol/ChemReactions/ReactionParser.h>
#include <GraphMol/ChemReactions/ReactionRunner.h>
#include <GraphMol/ChemReactions/ReactionUtils.h>
using namespace RDKit;
using std::unique_ptr;
TEST_CASE("Github #1632", "[Reaction,PDB,bug]") {
SECTION("basics") {
bool sanitize = true;
int flavor = 0;
std::unique_ptr<RWMol> mol(SequenceToMol("K", sanitize, flavor));
REQUIRE(mol);
REQUIRE(mol->getAtomWithIdx(0)->getMonomerInfo());
auto res = static_cast<AtomPDBResidueInfo*>(
mol->getAtomWithIdx(0)->getMonomerInfo());
CHECK(res->getResidueNumber() == 1);
std::unique_ptr<ChemicalReaction> rxn(RxnSmartsToChemicalReaction(
"[O:1]=[CX3:2]-[CX4:3]-[NX3:4]>>[O:1]=[CX3:2]-[CX4:3]-[NX3:4]-[C]"));
REQUIRE(rxn);
rxn->initReactantMatchers();
MOL_SPTR_VECT reacts;
reacts.push_back(ROMOL_SPTR(new ROMol(*mol)));
auto prods = rxn->runReactants(reacts);
CHECK(prods.size() == 1);
CHECK(prods[0].size() == 1);
auto p = prods[0][0];
CHECK(p->getNumAtoms() == mol->getNumAtoms() + 1);
REQUIRE(p->getAtomWithIdx(0)->getMonomerInfo());
auto pres = static_cast<AtomPDBResidueInfo*>(
p->getAtomWithIdx(0)->getMonomerInfo());
CHECK(pres->getResidueNumber() == 1);
REQUIRE(!p->getAtomWithIdx(4)->getMonomerInfo());
}
}
static void clearAtomMappingProps(ROMol& mol) {
for (auto&& a : mol.atoms()) {
a->clear();
}
}
TEST_CASE("Github #2366 Enhanced Stereo", "[Reaction,StereoGroup,bug]") {
SECTION("Reaction Preserves Stereo") {
ROMOL_SPTR mol("F[C@H](Cl)Br |o1:1|"_smiles);
REQUIRE(mol);
unique_ptr<ChemicalReaction> rxn(
RxnSmartsToChemicalReaction("[C@:1]>>[C@:1]"));
REQUIRE(rxn);
MOL_SPTR_VECT reactants = {mol};
rxn->initReactantMatchers();
auto prods = rxn->runReactants(reactants);
REQUIRE(prods.size() == 1);
REQUIRE(prods[0].size() == 1);
auto p = prods[0][0];
clearAtomMappingProps(*p);
CHECK(MolToCXSmiles(*p) == "F[C@H](Cl)Br |o1:1|");
}
SECTION("Reaction destroys one center in StereoGroup") {
ROMOL_SPTR mol("F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|"_smiles);
REQUIRE(mol);
unique_ptr<ChemicalReaction> rxn(
RxnSmartsToChemicalReaction("[C@:1]F>>[C:1]F"));
REQUIRE(rxn);
MOL_SPTR_VECT reactants = {mol};
rxn->initReactantMatchers();
auto prods = rxn->runReactants(reactants);
REQUIRE(prods.size() == 1);
REQUIRE(prods[0].size() == 1);
auto p = prods[0][0];
clearAtomMappingProps(*p);
CHECK(MolToCXSmiles(*p) == "FC(Cl)[C@@H](Cl)Br |&1:3|");
}
SECTION("Reaction splits StereoGroup") {
ROMOL_SPTR mol("F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|"_smiles);
REQUIRE(mol);
unique_ptr<ChemicalReaction> rxn(RxnSmartsToChemicalReaction(
"[F:1][C@:2][C@:3][Cl:4]>>[F:1][C@:2]O.O[C@:3][Cl:4]"));
REQUIRE(rxn);
MOL_SPTR_VECT reactants = {mol};
rxn->initReactantMatchers();
auto prods = rxn->runReactants(reactants);
REQUIRE(prods.size() == 1);
REQUIRE(prods[0].size() == 2);
auto p0 = prods[0][0];
auto p1 = prods[0][1];
clearAtomMappingProps(*p0);
clearAtomMappingProps(*p1);
CHECK(MolToCXSmiles(*p0) == "O[C@@H](F)Cl |&1:1|");
CHECK(MolToCXSmiles(*p1) == "O[C@@H](Cl)Br |&1:1|");
}
SECTION("Reaction combines StereoGroups") {
ROMOL_SPTR mol1("F[C@H](Cl)O |&1:1|"_smiles);
REQUIRE(mol1);
ROMOL_SPTR mol2("Cl[C@H](Br)O |&1:1|"_smiles);
REQUIRE(mol2);
unique_ptr<ChemicalReaction> rxn(RxnSmartsToChemicalReaction(
"[F:1][C@:2]O.O[C@:3][Cl:4]>>[F:1][C@:2][C@:3][Cl:4]"));
REQUIRE(rxn);
MOL_SPTR_VECT reactants = {mol1, mol2};
rxn->initReactantMatchers();
auto prods = rxn->runReactants(reactants);
REQUIRE(prods.size() == 1);
REQUIRE(prods[0].size() == 1);
auto p0 = prods[0][0];
clearAtomMappingProps(*p0);
CHECK(MolToCXSmiles(*p0) == "F[C@@H](Cl)[C@H](Cl)Br |&1:1,&2:3|");
}
}
TEST_CASE("Github #2427 cannot set maxProducts>1000 in runReactants",
"[Reaction,bug]") {
SECTION("Basics") {
std::string smi = "[C]";
for (unsigned int i = 0; i < 49; ++i) {
smi += ".[C]";
}
ROMOL_SPTR mol(SmilesToMol(smi));
REQUIRE(mol);
unique_ptr<ChemicalReaction> rxn(
RxnSmartsToChemicalReaction("([#6:1].[#6:2])>>[#6:1]-[#6:2]"));
REQUIRE(rxn);
MOL_SPTR_VECT reactants = {mol};
rxn->initReactantMatchers();
// by default we only get 1000 products:
{
auto prods = rxn->runReactants(reactants);
CHECK(prods.size() == 1000);
CHECK(prods[0].size() == 1);
}
{
auto prods = rxn->runReactants(reactants, 2000);
CHECK(prods.size() == 2000);
CHECK(prods[0].size() == 1);
}
}
}
TEST_CASE("negative charge queries. Part of testing changes for github #2604",
"[Reaction]") {
SECTION("no redundancy") {
unique_ptr<ChemicalReaction> rxn(
RxnSmartsToChemicalReaction("[N+{1-}:1]>>[#0-1:1]"));
REQUIRE(rxn);
// we don't have a way to directly create NegativeFormalCharge queries, so
// make one by hand
REQUIRE(rxn->getProducts()[0]->getAtomWithIdx(0)->hasQuery());
static_cast<QueryAtom*>(rxn->getProducts()[0]->getAtomWithIdx(0))
->expandQuery(makeAtomNegativeFormalChargeQuery(1));
unsigned nWarnings = 0;
unsigned nErrors = 0;
CHECK(rxn->validate(nWarnings, nErrors));
CHECK(nWarnings == 0);
CHECK(nErrors == 0);
}
SECTION("no redundancy2") {
unique_ptr<ChemicalReaction> rxn(
RxnSmartsToChemicalReaction("[N+{1-}:1]>>[#0+1:1]"));
REQUIRE(rxn);
// we don't have a way to directly create NegativeFormalCharge queries, so
// make one by hand
REQUIRE(rxn->getProducts()[0]->getAtomWithIdx(0)->hasQuery());
static_cast<QueryAtom*>(rxn->getProducts()[0]->getAtomWithIdx(0))
->expandQuery(makeAtomNegativeFormalChargeQuery(
-1)); // a bit kludgy, but we need to check
unsigned nWarnings = 0;
unsigned nErrors = 0;
CHECK(rxn->validate(nWarnings, nErrors));
CHECK(nWarnings == 0);
CHECK(nErrors == 0);
}
SECTION("redundancy") {
unique_ptr<ChemicalReaction> rxn(
// RxnSmartsToChemicalReaction("[N+{1-}:1]>>[#0+1+2:1]"));
RxnSmartsToChemicalReaction("[N+{1-}:1]>>[#0-1:1]"));
REQUIRE(rxn);
// we don't have a way to directly create NegativeFormalCharge queries, so
// make one by hand
REQUIRE(rxn->getProducts()[0]->getAtomWithIdx(0)->hasQuery());
static_cast<QueryAtom*>(rxn->getProducts()[0]->getAtomWithIdx(0))
->expandQuery(makeAtomNegativeFormalChargeQuery(2));
unsigned nWarnings = 0;
unsigned nErrors = 0;
CHECK(rxn->validate(nWarnings, nErrors));
CHECK(nWarnings == 1);
CHECK(nErrors == 0);
}
} | 1 | 20,752 | Please add two additional SECTIONs that show that this also works if the dative bond is in the reactant (reaction SMARTS `[O:1]->[H+]>>[O:1].[H+]`) or in the agents (reaction SMARTS `[O:1][H]>N->[Cu]>[O:1].[H]`) | rdkit-rdkit | cpp |
@@ -54,6 +54,15 @@ func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler,
MinVersion: c.config.TLSMinVersion,
CipherSuites: c.config.TLSCipherSuites,
},
+ RegenerateCerts: func() bool {
+ const regenerateDynamicListenerFile = "dynamic-cert-regenerate"
+ dynamicListenerRegenFilePath := filepath.Join(c.config.DataDir, "tls", regenerateDynamicListenerFile)
+ if _, err := os.Stat(dynamicListenerRegenFilePath); err == nil {
+ os.Remove(dynamicListenerRegenFilePath)
+ return true
+ }
+ return false
+ },
})
}
| 1 | package cluster
import (
"context"
"crypto/tls"
"errors"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"path/filepath"
"github.com/rancher/dynamiclistener"
"github.com/rancher/dynamiclistener/factory"
"github.com/rancher/dynamiclistener/storage/file"
"github.com/rancher/dynamiclistener/storage/kubernetes"
"github.com/rancher/dynamiclistener/storage/memory"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/etcd"
"github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/generated/controllers/core"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// newListener returns a new TCP listener and HTTP request handler using dynamiclistener.
// dynamiclistener will use the cluster's Server CA to sign the dynamically generate certificate,
// and will sync the certs into the Kubernetes datastore, with a local disk cache.
func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler, error) {
if c.managedDB != nil {
if _, err := os.Stat(etcd.ResetFile(c.config)); err == nil {
// delete the dynamic listener file if it exists after restoration to fix restoration
// on fresh nodes
os.Remove(filepath.Join(c.config.DataDir, "tls/dynamic-cert.json"))
}
}
tcp, err := dynamiclistener.NewTCPListener(c.config.BindAddress, c.config.SupervisorPort)
if err != nil {
return nil, nil, err
}
cert, key, err := factory.LoadCerts(c.runtime.ServerCA, c.runtime.ServerCAKey)
if err != nil {
return nil, nil, err
}
storage := tlsStorage(ctx, c.config.DataDir, c.runtime)
return dynamiclistener.NewListener(tcp, storage, cert, key, dynamiclistener.Config{
ExpirationDaysCheck: config.CertificateRenewDays,
Organization: []string{version.Program},
SANs: append(c.config.SANs, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc."+c.config.ClusterDomain),
CN: version.Program,
TLSConfig: &tls.Config{
ClientAuth: tls.RequestClientCert,
MinVersion: c.config.TLSMinVersion,
CipherSuites: c.config.TLSCipherSuites,
},
})
}
// initClusterAndHTTPS sets up the dynamic tls listener, request router,
// and cluster database. Once the database is up, it starts the supervisor http server.
func (c *Cluster) initClusterAndHTTPS(ctx context.Context) error {
// Set up dynamiclistener TLS listener and request handler
listener, handler, err := c.newListener(ctx)
if err != nil {
return err
}
// Get the base request handler
handler, err = c.getHandler(handler)
if err != nil {
return err
}
// Config the cluster database and allow it to add additional request handlers
handler, err = c.initClusterDB(ctx, handler)
if err != nil {
return err
}
// Create a HTTP server with the registered request handlers, using logrus for logging
server := http.Server{
Handler: handler,
}
if logrus.IsLevelEnabled(logrus.DebugLevel) {
server.ErrorLog = log.New(logrus.StandardLogger().Writer(), "Cluster-Http-Server ", log.LstdFlags)
} else {
server.ErrorLog = log.New(ioutil.Discard, "Cluster-Http-Server", 0)
}
// Start the supervisor http server on the tls listener
go func() {
err := server.Serve(listener)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
logrus.Fatalf("server stopped: %v", err)
}
}()
// Shutdown the http server when the context is closed
go func() {
<-ctx.Done()
server.Shutdown(context.Background())
}()
return nil
}
// tlsStorage creates an in-memory cache for dynamiclistener's certificate, backed by a file on disk
// and the Kubernetes datastore.
func tlsStorage(ctx context.Context, dataDir string, runtime *config.ControlRuntime) dynamiclistener.TLSStorage {
fileStorage := file.New(filepath.Join(dataDir, "tls/dynamic-cert.json"))
cache := memory.NewBacked(fileStorage)
return kubernetes.New(ctx, func() *core.Factory {
return runtime.Core
}, metav1.NamespaceSystem, version.Program+"-serving", cache)
}
| 1 | 10,602 | What happens if the certificate rotation fails and we are prematurely removing this file? | k3s-io-k3s | go |
@@ -97,6 +97,18 @@ func (s *Single) IsRunning() bool {
// Introspect returns a ChooserStatus with a single PeerStatus.
func (s *Single) Introspect() introspection.ChooserStatus {
+ if !s.once.IsRunning() {
+ return introspection.ChooserStatus{
+ Name: "Single",
+ Peers: []introspection.PeerStatus{
+ {
+ Identifier: s.pid.Identifier(),
+ State: "uninitialized",
+ },
+ },
+ }
+ }
+
peerStatus := s.p.Status()
peer := introspection.PeerStatus{
Identifier: s.p.Identifier(), | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package peer
import (
"context"
"fmt"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/introspection"
intsync "go.uber.org/yarpc/internal/sync"
)
// Single implements the Chooser interface for a single peer
type Single struct {
once intsync.LifecycleOnce
t peer.Transport
pid peer.Identifier
p peer.Peer
err error
boundOnFinish func(error)
}
// NewSingle creates a static Chooser with a single Peer
func NewSingle(pid peer.Identifier, transport peer.Transport) *Single {
s := &Single{
once: intsync.Once(),
pid: pid,
t: transport,
}
s.boundOnFinish = s.onFinish
return s
}
// Choose returns the single peer
func (s *Single) Choose(ctx context.Context, _ *transport.Request) (peer.Peer, func(error), error) {
if err := s.once.WhenRunning(ctx); err != nil {
return nil, nil, err
}
s.p.StartRequest()
return s.p, s.boundOnFinish, s.err
}
func (s *Single) onFinish(_ error) {
s.p.EndRequest()
}
// NotifyStatusChanged receives notifications from the transport when the peer
// connects, disconnects, accepts a request, and so on.
func (s *Single) NotifyStatusChanged(_ peer.Identifier) {
}
// Start is a noop
func (s *Single) Start() error {
return s.once.Start(s.start)
}
func (s *Single) start() error {
p, err := s.t.RetainPeer(s.pid, s)
s.p = p
s.err = err
return err
}
// Stop is a noop
func (s *Single) Stop() error {
return s.once.Stop(s.stop)
}
func (s *Single) stop() error {
return s.t.ReleasePeer(s.pid, s)
}
// IsRunning is a noop
func (s *Single) IsRunning() bool {
return true
}
// Introspect returns a ChooserStatus with a single PeerStatus.
func (s *Single) Introspect() introspection.ChooserStatus {
peerStatus := s.p.Status()
peer := introspection.PeerStatus{
Identifier: s.p.Identifier(),
State: fmt.Sprintf("%s, %d pending request(s)",
peerStatus.ConnectionStatus.String(),
peerStatus.PendingRequestCount),
}
return introspection.ChooserStatus{
Name: "Single",
Peers: []introspection.PeerStatus{peer},
}
}
| 1 | 14,025 | does this change belong here? | yarpc-yarpc-go | go |
@@ -291,6 +291,8 @@ func (w *Workflow) populate() error {
"DATETIME": now.Format("20060102150405"),
"TIMESTAMP": strconv.FormatInt(now.Unix(), 10),
"USERNAME": w.username,
+ "WFDIR": w.workflowDir,
+ "CWD": os.Getwd(),
}
var replacements []string | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package workflow describes a daisy workflow.
package workflow
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/user"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/option"
)
const defaultTimeout = "10m"
type gcsLogger struct {
client *storage.Client
bucket, object string
buf *bytes.Buffer
ctx context.Context
}
func (l *gcsLogger) Write(b []byte) (int, error) {
if l.buf == nil {
l.buf = new(bytes.Buffer)
}
l.buf.Write(b)
wc := l.client.Bucket(l.bucket).Object(l.object).NewWriter(l.ctx)
wc.ContentType = "text/plain"
n, err := wc.Write(l.buf.Bytes())
if err != nil {
return 0, err
}
if err := wc.Close(); err != nil {
return 0, err
}
return n, err
}
type vars struct {
Value string
Required bool
Description string
}
// Workflow is a single Daisy workflow workflow.
type Workflow struct {
// Populated on New() construction.
Ctx context.Context `json:"-"`
Cancel chan struct{} `json:"-"`
// Workflow template fields.
// Workflow name.
Name string
// Project to run in.
Project string
// Zone to run in.
Zone string
// GCS Path to use for scratch data and write logs/results to.
GCSPath string
// Path to OAuth credentials file.
OAuthPath string `json:",omitempty"`
// Sources used by this workflow, map of destination to source.
Sources map[string]string `json:",omitempty"`
// Vars defines workflow variables, substitution is done at Workflow run time.
Vars map[string]json.RawMessage `json:",omitempty"`
Steps map[string]*Step
// Map of steps to their dependencies.
Dependencies map[string][]string
// Working fields.
vars map[string]vars
workflowDir string
parent *Workflow
bucket string
scratchPath string
sourcesPath string
logsPath string
outsPath string
username string
gcsLogWriter io.Writer
ComputeClient *compute.Client `json:"-"`
StorageClient *storage.Client `json:"-"`
id string
logger *log.Logger
cleanupHooks []func() error
cleanupHooksMx sync.Mutex
}
func (w *Workflow) AddVar(k, v string) {
if w.vars == nil {
w.vars = map[string]vars{}
}
w.vars[k] = vars{Value: v}
}
func (w *Workflow) addCleanupHook(hook func() error) {
w.cleanupHooksMx.Lock()
w.cleanupHooks = append(w.cleanupHooks, hook)
w.cleanupHooksMx.Unlock()
}
// Validate runs validation on the workflow.
func (w *Workflow) Validate() error {
w.gcsLogWriter = ioutil.Discard
if err := w.validateRequiredFields(); err != nil {
close(w.Cancel)
return fmt.Errorf("error validating workflow: %v", err)
}
if err := w.populate(); err != nil {
close(w.Cancel)
return fmt.Errorf("error populating workflow: %v", err)
}
w.logger.Print("Validating workflow")
if err := w.validate(); err != nil {
w.logger.Printf("Error validating workflow: %v", err)
close(w.Cancel)
return err
}
w.logger.Print("Validation Complete")
return nil
}
// Run runs a workflow.
func (w *Workflow) Run() error {
if err := w.Validate(); err != nil {
return err
}
defer w.cleanup()
w.logger.Print("Uploading sources")
if err := w.uploadSources(); err != nil {
w.logger.Printf("Error uploading sources: %v", err)
close(w.Cancel)
return err
}
w.logger.Print("Running workflow")
if err := w.run(); err != nil {
w.logger.Printf("Error running workflow: %v", err)
select {
case <-w.Cancel:
default:
close(w.Cancel)
}
return err
}
return nil
}
func (w *Workflow) String() string {
f := "{Name:%q Project:%q Zone:%q Bucket:%q OAuthPath:%q Sources:%s Vars:%s Steps:%s Dependencies:%s id:%q}"
return fmt.Sprintf(f, w.Name, w.Project, w.Zone, w.bucket, w.OAuthPath, w.Sources, w.Vars, w.Steps, w.Dependencies, w.id)
}
func (w *Workflow) cleanup() {
w.logger.Printf("Workflow %q cleaning up (this may take up to 2 minutes.", w.Name)
for _, hook := range w.cleanupHooks {
if err := hook(); err != nil {
w.logger.Printf("Error returned from cleanup hook: %s", err)
}
}
}
func (w *Workflow) genName(n string) string {
prefix := fmt.Sprintf("%s-%s", n, w.Name)
if len(prefix) > 57 {
prefix = prefix[0:56]
}
result := fmt.Sprintf("%s-%s", prefix, w.id)
if len(result) > 64 {
result = result[0:63]
}
return strings.ToLower(result)
}
func (w *Workflow) populateStep(step *Step) error {
if step.Timeout == "" {
step.Timeout = defaultTimeout
}
timeout, err := time.ParseDuration(step.Timeout)
if err != nil {
return err
}
step.timeout = timeout
if step.WaitForInstancesSignal != nil {
for i, s := range *step.WaitForInstancesSignal {
if s.Interval == "" {
s.Interval = defaultInterval
}
interval, err := time.ParseDuration(s.Interval)
if err != nil {
return err
}
(*step.WaitForInstancesSignal)[i].interval = interval
}
}
// Recurse on subworkflows.
if step.SubWorkflow == nil {
return nil
}
step.SubWorkflow.workflow.GCSPath = fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath)
step.SubWorkflow.workflow.Name = step.name
step.SubWorkflow.workflow.Project = w.Project
step.SubWorkflow.workflow.Zone = w.Zone
step.SubWorkflow.workflow.OAuthPath = w.OAuthPath
step.SubWorkflow.workflow.ComputeClient = w.ComputeClient
step.SubWorkflow.workflow.StorageClient = w.StorageClient
step.SubWorkflow.workflow.Ctx = w.Ctx
step.SubWorkflow.workflow.Cancel = w.Cancel
step.SubWorkflow.workflow.gcsLogWriter = w.gcsLogWriter
step.SubWorkflow.workflow.vars = map[string]vars{}
for k, v := range step.SubWorkflow.Vars {
step.SubWorkflow.workflow.vars[k] = vars{Value: v}
}
return step.SubWorkflow.workflow.populate()
}
func (w *Workflow) populateVars() error {
if w.vars == nil {
w.vars = map[string]vars{}
}
for k, v := range w.Vars {
// Don't overwrite existing vars (applies to subworkflows).
if _, ok := w.vars[k]; ok {
continue
}
var sv string
if err := json.Unmarshal(v, &sv); err == nil {
w.vars[k] = vars{Value: sv}
continue
}
var vv vars
if err := json.Unmarshal(v, &vv); err == nil {
if vv.Required && vv.Value == "" {
return fmt.Errorf("required var %q cannot be blank", k)
}
w.vars[k] = vv
continue
}
return fmt.Errorf("cannot unmarshal Var %q, value: %s", k, v)
}
return nil
}
func (w *Workflow) populate() error {
w.id = randString(5)
now := time.Now().UTC()
cu, err := user.Current()
if err != nil {
return err
}
if err := w.populateVars(); err != nil {
return err
}
w.username = cu.Username
autovars := map[string]string{
"ID": w.id,
"DATE": now.Format("20060102"),
"DATETIME": now.Format("20060102150405"),
"TIMESTAMP": strconv.FormatInt(now.Unix(), 10),
"USERNAME": w.username,
}
var replacements []string
for k, v := range autovars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
for k, v := range w.vars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value)
}
substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...))
// Set up GCS paths.
bkt, p, err := splitGCSPath(w.GCSPath)
if err != nil {
return err
}
w.bucket = bkt
w.scratchPath = path.Join(p, fmt.Sprintf("daisy-%s-%s-%s", w.Name, now.Format("20060102-15:04:05"), w.id))
w.sourcesPath = path.Join(w.scratchPath, "sources")
w.logsPath = path.Join(w.scratchPath, "logs")
w.outsPath = path.Join(w.scratchPath, "outs")
// Do replacement for autovars. Autovars pull from workflow fields,
// so Vars replacement must run before this to resolve the final
// value for those fields.
autovars = map[string]string{
"NAME": w.Name,
"ZONE": w.Zone,
"PROJECT": w.Project,
"GCSPATH": w.GCSPath,
"SCRATCHPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath),
"SOURCESPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.sourcesPath),
"LOGSPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.logsPath),
"OUTSPATH": fmt.Sprintf("gs://%s/%s", w.bucket, w.outsPath),
}
replacements = []string{}
for k, v := range autovars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...))
if w.ComputeClient == nil {
w.ComputeClient, err = compute.NewClient(w.Ctx, option.WithServiceAccountFile(w.OAuthPath))
if err != nil {
return err
}
}
if w.StorageClient == nil {
w.StorageClient, err = storage.NewClient(w.Ctx, option.WithServiceAccountFile(w.OAuthPath))
if err != nil {
return err
}
}
if w.logger == nil {
name := w.Name
for parent := w.parent; parent != nil; parent = w.parent.parent {
name = parent.Name + "." + name
}
prefix := fmt.Sprintf("[%s]: ", name)
flags := log.Ldate | log.Ltime
if w.gcsLogWriter == nil {
w.gcsLogWriter = &gcsLogger{client: w.StorageClient, bucket: w.bucket, object: path.Join(w.logsPath, "daisy.log"), ctx: w.Ctx}
log.New(os.Stdout, prefix, flags).Println("Logs will be streamed to", "gs://"+path.Join(w.bucket, w.logsPath, "daisy.log"))
}
w.logger = log.New(io.MultiWriter(os.Stdout, w.gcsLogWriter), prefix, flags)
}
for name, s := range w.Steps {
s.name = name
if err := w.populateStep(s); err != nil {
return err
}
}
return nil
}
// Print populates then pretty prints the workflow.
func (w *Workflow) Print() {
w.gcsLogWriter = ioutil.Discard
if err := w.populate(); err != nil {
fmt.Println("Error running populate:", err)
}
b, err := json.MarshalIndent(w, "", " ")
if err != nil {
fmt.Println("Error marshalling workflow for printing:", err)
}
fmt.Println(string(b))
}
func (w *Workflow) run() error {
return w.traverseDAG(func(s *Step) error {
return w.runStep(s)
})
}
func (w *Workflow) runStep(s *Step) error {
timeout := make(chan struct{})
go func() {
time.Sleep(s.timeout)
close(timeout)
}()
e := make(chan error)
go func() {
e <- s.run(w)
}()
select {
case err := <-e:
return err
case <-timeout:
return fmt.Errorf("step %q did not stop in specified timeout of %s", s.name, s.timeout)
}
}
// Concurrently traverse the DAG, running func f on each step.
// Return an error if f returns an error on any step.
func (w *Workflow) traverseDAG(f func(*Step) error) error {
// waiting = steps and the dependencies they are waiting for.
// running = the currently running steps.
// start = map of steps' start channels/semaphores.
// done = map of steps' done channels for signaling step completion.
waiting := map[string][]string{}
var running []string
start := map[string]chan error{}
done := map[string]chan error{}
// Setup: channels, copy dependencies.
for name := range w.Steps {
waiting[name] = w.Dependencies[name]
start[name] = make(chan error)
done[name] = make(chan error)
}
// Setup: goroutine for each step. Each waits to be notified to start.
for name, s := range w.Steps {
go func(name string, s *Step) {
// Wait for signal, then run the function. Return any errs.
if err := <-start[name]; err != nil {
done[name] <- err
} else if err := f(s); err != nil {
done[name] <- err
}
close(done[name])
}(name, s)
}
// Main signaling logic.
for len(waiting) != 0 || len(running) != 0 {
// If we got a Cancel signal, kill all waiting steps.
// Let running steps finish.
select {
case <-w.Cancel:
waiting = map[string][]string{}
default:
}
// Kick off all steps that aren't waiting for anything.
for name, deps := range waiting {
if len(deps) == 0 {
delete(waiting, name)
running = append(running, name)
close(start[name])
}
}
// Sanity check. There should be at least one running step,
// but loop back through if there isn't.
if len(running) == 0 {
continue
}
// Get next finished step. Return the step error if it erred.
finished, err := stepsListen(running, done)
if err != nil {
return err
}
// Remove finished step from other steps' waiting lists.
for name, deps := range waiting {
waiting[name] = filter(deps, finished)
}
// Remove finished from currently running list.
running = filter(running, finished)
}
return nil
}
// New instantiates a new workflow.
func New(ctx context.Context) *Workflow {
var w Workflow
w.Ctx = ctx
// We can't use context.WithCancel as we use the context even after cancel for cleanup.
w.Cancel = make(chan struct{})
initWorkflowResources(&w)
return &w
}
// NewFromFile reads and unmarshals a workflow file.
// Recursively reads subworkflow steps as well.
func NewFromFile(ctx context.Context, file string) (*Workflow, error) {
w := New(ctx)
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
w.workflowDir, err = filepath.Abs(filepath.Dir(file))
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, &w); err != nil {
// If this is a syntax error return a useful error.
sErr, ok := err.(*json.SyntaxError)
if !ok {
return nil, err
}
// Byte number where the error line starts.
start := bytes.LastIndex(data[:sErr.Offset], []byte("\n")) + 1
// Assume end byte of error line is EOF unless this isn't the last line.
end := len(data)
if i := bytes.Index(data[start:], []byte("\n")); i >= 0 {
end = start + i
}
// Line number of error.
line := bytes.Count(data[:start], []byte("\n")) + 1
// Position of error in line (where to place the '^').
pos := int(sErr.Offset) - start
if pos != 0 {
pos = pos - 1
}
return nil, fmt.Errorf("%s: JSON syntax error in line %d: %s \n%s\n%s^", file, line, err, data[start:end], strings.Repeat(" ", pos))
}
if w.OAuthPath != "" && !filepath.IsAbs(w.OAuthPath) {
w.OAuthPath = filepath.Join(w.workflowDir, w.OAuthPath)
}
// We need to unmarshal any SubWorkflows.
for name, s := range w.Steps {
s.name = name
s.w = w
if s.SubWorkflow == nil {
continue
}
swPath := s.SubWorkflow.Path
if !filepath.IsAbs(swPath) {
swPath = filepath.Join(w.workflowDir, swPath)
}
sw, err := NewFromFile(w.Ctx, swPath)
if err != nil {
return nil, err
}
s.SubWorkflow.workflow = sw
sw.parent = w
}
return w, nil
}
// stepsListen returns the first step that finishes/errs.
func stepsListen(names []string, chans map[string]chan error) (string, error) {
cases := make([]reflect.SelectCase, len(names))
for i, name := range names {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(chans[name])}
}
caseIndex, value, recvOk := reflect.Select(cases)
name := names[caseIndex]
if recvOk {
// recvOk -> a step failed, return the error.
return name, value.Interface().(error)
}
return name, nil
}
| 1 | 6,510 | This returns an error | GoogleCloudPlatform-compute-image-tools | go |
@@ -131,6 +131,8 @@ type Config struct {
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
+ // FIXME Add this to libcalico-go
+ IptablesBackend string `config:"oneof(legacy,nft);legacy;local"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"` | 1 | // Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
)
var (
// RegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value represents a regular expression and is marked by '/' at the start
// and end and cannot have spaces
RegexpIfaceElemRegexp = regexp.MustCompile(`^\/[^\s]+\/$`)
// NonRegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value is between 1-15 chars long and only be alphanumeric or - or _
NonRegexpIfaceElemRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}$`)
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
IfaceParamRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,15}$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
XDPRefreshInterval time.Duration `config:"seconds;90"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
OpenstackRegion string `config:"region;;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude []*regexp.Regexp `config:"iface-list-regexp;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
VXLANEnabled bool `config:"bool;false"`
VXLANPort int `config:"int;4789"`
VXLANVNI int `config:"int;4096"`
VXLANMTU int `config:"int;1410;non-zero"`
IPv4VXLANTunnelAddr net.IP `config:"ipv4;"`
VXLANTunnelMACAddr string `config:"string;"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"string;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
NATOutgoingAddress net.IP `config:"ipv4;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
IptablesNATOutgoingInterfaceFilter string `config:"iface-param;"`
SidecarAccelerationEnabled bool `config:"bool;false"`
XDPEnabled bool `config:"bool;false"`
GenericXDPEnabled bool `config:"bool;false"`
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := apiconfig.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case.
if config.setByConfigFileOrEnvironment("DatastoreType") && config.DatastoreType == "etcdv3" {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
cfg.Spec.EtcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
}
if !config.IpInIpEnabled && !config.VXLANEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("Encap disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "iface-list-regexp":
param = &RegexpPatternListParam{
NonRegexpElemRegexp: NonRegexpIfaceElemRegexp,
RegexpElemRegexp: RegexpIfaceElemRegexp,
Delimiter: ",",
Msg: "list contains invalid Linux interface name or regex pattern",
}
case "iface-param":
param = &RegexpParam{Regexp: IfaceParamRegexp,
Msg: "invalid Linux interface parameter"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "region":
param = &RegionParam{}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 17,009 | If we think we might someday write a native nftables backend, do you think it would make sense to just use generic dataplane configuration? e.g, `dataplane = iptables | ebpf | nftables`, but for now selecting `nftables` uses iptables in nft compat mode? | projectcalico-felix | go |
@@ -53,7 +53,10 @@ func TestBlockPropsManyNodes(t *testing.T) {
connect(t, nodes[1], nodes[2])
connect(t, nodes[2], nodes[3])
- baseTS := minerNode.ChainReader.Head()
+ head := minerNode.ChainReader.GetHead()
+ headTipSetAndState, err := minerNode.ChainReader.GetTipSetAndState(ctx, head)
+ require.NoError(err)
+ baseTS := headTipSetAndState.TipSet
require.NotNil(t, baseTS)
proof := testhelpers.MakeRandomPoSTProofForTest()
| 1 | package node
import (
"context"
"testing"
"time"
"github.com/libp2p/go-libp2p-peerstore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/protocol/storage"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/testhelpers"
"github.com/filecoin-project/go-filecoin/types"
)
func connect(t *testing.T, nd1, nd2 *Node) {
t.Helper()
pinfo := peerstore.PeerInfo{
ID: nd2.Host().ID(),
Addrs: nd2.Host().Addrs(),
}
if err := nd1.Host().Connect(context.Background(), pinfo); err != nil {
t.Fatal(err)
}
}
func TestBlockPropsManyNodes(t *testing.T) {
t.Parallel()
require := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
assert := assert.New(t)
numNodes := 4
minerAddr, nodes := makeNodes(t, assert, numNodes)
// Now add 10 null blocks and 1 tipset.
signer, ki := types.NewMockSignersAndKeyInfo(1)
mockSignerPubKey := ki[0].PublicKey()
StartNodes(t, nodes)
defer StopNodes(nodes)
minerNode := nodes[0]
connect(t, minerNode, nodes[1])
connect(t, nodes[1], nodes[2])
connect(t, nodes[2], nodes[3])
baseTS := minerNode.ChainReader.Head()
require.NotNil(t, baseTS)
proof := testhelpers.MakeRandomPoSTProofForTest()
ticket, err := signer.CreateTicket(proof, mockSignerPubKey)
require.NoError(err)
nextBlk := &types.Block{
Miner: minerAddr,
Parents: baseTS.ToSortedCidSet(),
Height: types.Uint64(1),
ParentWeight: types.Uint64(10000),
StateRoot: baseTS.ToSlice()[0].StateRoot,
Proof: proof,
Ticket: ticket,
}
// Wait for network connection notifications to propagate
time.Sleep(time.Millisecond * 300)
assert.NoError(minerNode.AddNewBlock(ctx, nextBlk))
equal := false
for i := 0; i < 30; i++ {
for j := 1; j < numNodes; j++ {
otherHead := nodes[j].ChainReader.Head()
assert.NotNil(t, otherHead)
equal = otherHead.ToSlice()[0].Cid().Equals(nextBlk.Cid())
if equal {
break
}
time.Sleep(time.Millisecond * 20)
}
}
assert.True(equal, "failed to sync chains")
}
func TestChainSync(t *testing.T) {
ctx := context.Background()
assert := assert.New(t)
minerAddr, nodes := makeNodes(t, assert, 2)
StartNodes(t, nodes)
defer StopNodes(nodes)
baseTS := nodes[0].ChainReader.Head()
signer, ki := types.NewMockSignersAndKeyInfo(1)
mockSignerPubKey := ki[0].PublicKey()
stateRoot := baseTS.ToSlice()[0].StateRoot
nextBlk1 := testhelpers.NewValidTestBlockFromTipSet(baseTS, stateRoot, 1, minerAddr, mockSignerPubKey, signer)
nextBlk2 := testhelpers.NewValidTestBlockFromTipSet(baseTS, stateRoot, 2, minerAddr, mockSignerPubKey, signer)
nextBlk3 := testhelpers.NewValidTestBlockFromTipSet(baseTS, stateRoot, 3, minerAddr, mockSignerPubKey, signer)
assert.NoError(nodes[0].AddNewBlock(ctx, nextBlk1))
assert.NoError(nodes[0].AddNewBlock(ctx, nextBlk2))
assert.NoError(nodes[0].AddNewBlock(ctx, nextBlk3))
connect(t, nodes[0], nodes[1])
equal := false
for i := 0; i < 30; i++ {
otherHead := nodes[1].ChainReader.Head()
assert.NotNil(t, otherHead)
equal = otherHead.ToSlice()[0].Cid().Equals(nextBlk3.Cid())
if equal {
break
}
time.Sleep(time.Millisecond * 20)
}
assert.True(equal, "failed to sync chains")
}
type ZeroRewarder struct{}
func (r *ZeroRewarder) BlockReward(ctx context.Context, st state.Tree, minerAddr address.Address) error {
return nil
}
func (r *ZeroRewarder) GasReward(ctx context.Context, st state.Tree, minerAddr address.Address, msg *types.SignedMessage, cost *types.AttoFIL) error {
return nil
}
// makeNodes makes at least two nodes, a miner and a client; numNodes is the total wanted
func makeNodes(t *testing.T, assertions *assert.Assertions, numNodes int) (address.Address, []*Node) {
seed := MakeChainSeed(t, TestGenCfg)
configOpts := []ConfigOpt{RewarderConfigOption(&ZeroRewarder{})}
minerNode := MakeNodeWithChainSeed(t, seed, configOpts,
PeerKeyOpt(PeerKeys[0]),
AutoSealIntervalSecondsOpt(1),
)
seed.GiveKey(t, minerNode, 0)
mineraddr, minerOwnerAddr := seed.GiveMiner(t, minerNode, 0)
_, err := storage.NewMiner(mineraddr, minerOwnerAddr, minerNode, minerNode.Repo.DealsDatastore(), minerNode.PorcelainAPI)
assertions.NoError(err)
nodes := []*Node{minerNode}
nodeLimit := 1
if numNodes > 2 {
nodeLimit = numNodes
}
for i := 0; i < nodeLimit; i++ {
nodes = append(nodes, MakeNodeWithChainSeed(t, seed, configOpts))
}
return mineraddr, nodes
}
| 1 | 18,239 | Looks like that helper function is general enough that it should reside in testhelpers | filecoin-project-venus | go |
@@ -456,6 +456,11 @@ func writeDrupal6DdevSettingsFile(settings *DrupalSettings, filePath string) err
// WriteDrushrc writes out drushrc.php based on passed-in values.
// This works on Drupal 6 and Drupal 7 or with drush8 and older
func WriteDrushrc(app *DdevApp, filePath string) error {
+ // Ignore because this is a settings file.
+ if app.DisableSettingsManagement {
+ return nil
+ }
+
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature) | 1 | package ddevapp
import (
"fmt"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/nodeps"
"github.com/gobuffalo/packr/v2"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"io/ioutil"
"os"
"path"
"path/filepath"
"text/template"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/archive"
)
// DrupalSettings encapsulates all the configurations for a Drupal site.
type DrupalSettings struct {
DeployName string
DeployURL string
DatabaseName string
DatabaseUsername string
DatabasePassword string
DatabaseHost string
DatabaseDriver string
DatabasePort string
DatabasePrefix string
HashSalt string
Signature string
SitePath string
SiteSettings string
SiteSettingsDdev string
SyncDir string
DockerIP string
DBPublishedPort int
}
// NewDrupalSettings produces a DrupalSettings object with default.
func NewDrupalSettings(app *DdevApp) *DrupalSettings {
dockerIP, _ := dockerutil.GetDockerIP()
dbPublishedPort, _ := app.GetPublishedPort("db")
return &DrupalSettings{
DatabaseName: "db",
DatabaseUsername: "db",
DatabasePassword: "db",
DatabaseHost: "db",
DatabaseDriver: "mysql",
DatabasePort: GetPort("db"),
DatabasePrefix: "",
HashSalt: util.RandString(64),
Signature: DdevFileSignature,
SitePath: path.Join("sites", "default"),
SiteSettings: "settings.php",
SiteSettingsDdev: "settings.ddev.php",
SyncDir: path.Join("files", "sync"),
DockerIP: dockerIP,
DBPublishedPort: dbPublishedPort,
}
}
// settingsIncludeStanza defines the template that will be appended to
// a project's settings.php in the event that the file already exists.
const settingsIncludeStanza = `
// Automatically generated include for settings managed by ddev.
$ddev_settings = dirname(__FILE__) . '/settings.ddev.php';
if (is_readable($ddev_settings) && getenv('IS_DDEV_PROJECT') == 'true') {
require $ddev_settings;
}
`
const (
drupal8DdevSettingsTemplate = `<?php
{{ $config := . }}
/**
* @file
* {{ $config.Signature }}: Automatically generated Drupal settings file.
* ddev manages this file and may delete or overwrite the file unless this
* comment is removed. It is recommended that you leave this file alone.
*/
$host = "{{ $config.DatabaseHost }}";
$port = {{ $config.DatabasePort }};
// If DDEV_PHP_VERSION is not set but IS_DDEV_PROJECT *is*, it means we're running (drush) on the host,
// so use the host-side bind port on docker IP
if (empty(getenv('DDEV_PHP_VERSION') && getenv('IS_DDEV_PROJECT') == 'true')) {
$host = "{{ $config.DockerIP }}";
$port = {{ $config.DBPublishedPort }};
}
$databases['default']['default'] = array(
'database' => "{{ $config.DatabaseName }}",
'username' => "{{ $config.DatabaseUsername }}",
'password' => "{{ $config.DatabasePassword }}",
'host' => $host,
'driver' => "{{ $config.DatabaseDriver }}",
'port' => $port,
'prefix' => "{{ $config.DatabasePrefix }}",
);
$settings['hash_salt'] = '{{ $config.HashSalt }}';
// This will prevent Drupal from setting read-only permissions on sites/default.
$settings['skip_permissions_hardening'] = TRUE;
// This will ensure the site can only be accessed through the intended host
// names. Additional host patterns can be added for custom configurations.
$settings['trusted_host_patterns'] = ['.*'];
// Don't use Symfony's APCLoader. ddev includes APCu; Composer's APCu loader has
// better performance.
$settings['class_loader_auto_detect'] = FALSE;
// This specifies the default configuration sync directory.
// For D8 before 8.8.0, we set $config_directories[CONFIG_SYNC_DIRECTORY] if not set
if (version_compare(Drupal::VERSION, "8.8.0", '<') &&
empty($config_directories[CONFIG_SYNC_DIRECTORY])) {
$config_directories[CONFIG_SYNC_DIRECTORY] = 'sites/default/files/sync';
}
// For D8.8/D8.9, set $settings['config_sync_directory'] if neither
// $config_directories nor $settings['config_sync_directory is set
if (version_compare(DRUPAL::VERSION, "8.8.0", '>=') &&
version_compare(DRUPAL::VERSION, "9.0.0", '<') &&
empty($config_directories[CONFIG_SYNC_DIRECTORY]) &&
empty($settings['config_sync_directory'])) {
$settings['config_sync_directory'] = 'sites/default/files/sync';
}
// For Drupal9, it's always $settings['config_sync_directory']
if (version_compare(DRUPAL::VERSION, "9.0.0", '>=') &&
empty($settings['config_sync_directory'])) {
$settings['config_sync_directory'] = 'sites/default/files/sync';
}
`
)
const (
drupal7DdevSettingsTemplate = `<?php
{{ $config := . }}
/**
* @file
* {{ $config.Signature }}: Automatically generated Drupal settings file.
* ddev manages this file and may delete or overwrite the file unless this
* comment is removed.
*/
$host = "{{ $config.DatabaseHost }}";
$port = {{ $config.DatabasePort }};
// If DDEV_PHP_VERSION is not set but IS_DDEV_PROJECT *is*, it means we're running (drush) on the host,
// so use the host-side bind port on docker IP
if (empty(getenv('DDEV_PHP_VERSION') && getenv('IS_DDEV_PROJECT') == 'true')) {
$host = "{{ $config.DockerIP }}";
$port = {{ $config.DBPublishedPort }};
}
$databases['default']['default'] = array(
'database' => "{{ $config.DatabaseName }}",
'username' => "{{ $config.DatabaseUsername }}",
'password' => "{{ $config.DatabasePassword }}",
'host' => $host,
'driver' => "{{ $config.DatabaseDriver }}",
'port' => $port,
'prefix' => "{{ $config.DatabasePrefix }}",
);
$drupal_hash_salt = '{{ $config.HashSalt }}';
`
)
const (
drupal6DdevSettingsTemplate = `<?php
{{ $config := . }}
/**
* @file
* {{ $config.Signature }}: Automatically generated Drupal settings file.
* ddev manages this file and may delete or overwrite the file unless this
* comment is removed.
*/
$host = "{{ $config.DatabaseHost }}";
$port = {{ $config.DatabasePort }};
// If DDEV_PHP_VERSION is not set but IS_DDEV_PROJECT *is*, it means we're running (drush) on the host,
// so use the host-side bind port on docker IP
if (empty(getenv('DDEV_PHP_VERSION') && getenv('IS_DDEV_PROJECT') == 'true')) {
$host = "{{ $config.DockerIP }}";
$port = {{ $config.DBPublishedPort }};
}
$db_url = "{{ $config.DatabaseDriver }}://{{ $config.DatabaseUsername }}:{{ $config.DatabasePassword }}@$host:$port/{{ $config.DatabaseName }}";
`
)
// manageDrupalSettingsFile will direct inspecting and writing of settings.php.
func manageDrupalSettingsFile(app *DdevApp, drupalConfig *DrupalSettings, appType string) error {
// We'll be writing/appending to the settings files and parent directory, make sure we have permissions to do so
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
if !fileutil.FileExists(app.SiteSettingsPath) {
output.UserOut.Printf("No %s file exists, creating one", drupalConfig.SiteSettings)
if err := writeDrupalSettingsFile(app.SiteSettingsPath, appType); err != nil {
return fmt.Errorf("failed to write: %v", err)
}
}
included, err := settingsHasInclude(drupalConfig, app.SiteSettingsPath)
if err != nil {
return fmt.Errorf("failed to check for include: %v", err)
}
if included {
output.UserOut.Printf("Existing %s file includes %s", drupalConfig.SiteSettings, drupalConfig.SiteSettingsDdev)
} else {
output.UserOut.Printf("Existing %s file does not include %s, modifying to include ddev settings", drupalConfig.SiteSettings, drupalConfig.SiteSettingsDdev)
if err := appendIncludeToDrupalSettingsFile(app.SiteSettingsPath, app.Type); err != nil {
return fmt.Errorf("failed to include %s in %s: %v", drupalConfig.SiteSettingsDdev, drupalConfig.SiteSettings, err)
}
}
return nil
}
// writeDrupalSettingsFile creates the project's settings.php if it doesn't exist
func writeDrupalSettingsFile(filePath string, appType string) error {
box := packr.New("drupal_settings_packr_assets", "./drupal_settings_packr_assets")
content, err := box.Find(appType + "/settings.php")
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
// Create file
err = ioutil.WriteFile(filePath, content, 0755)
if err != nil {
return err
}
return nil
}
// createDrupal7SettingsFile manages creation and modification of settings.php and settings.ddev.php.
// If a settings.php file already exists, it will be modified to ensure that it includes
// settings.ddev.php, which contains ddev-specific configuration.
func createDrupal7SettingsFile(app *DdevApp) (string, error) {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
if err := manageDrupalSettingsFile(app, drupalConfig, app.Type); err != nil {
return "", err
}
if err := writeDrupal7DdevSettingsFile(drupalConfig, app.SiteDdevSettingsFile); err != nil {
return "", fmt.Errorf("`failed to write` Drupal settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return app.SiteDdevSettingsFile, nil
}
// createDrupal8SettingsFile manages creation and modification of settings.php and settings.ddev.php.
// If a settings.php file already exists, it will be modified to ensure that it includes
// settings.ddev.php, which contains ddev-specific configuration.
func createDrupal8SettingsFile(app *DdevApp) (string, error) {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
if err := manageDrupalSettingsFile(app, drupalConfig, app.Type); err != nil {
return "", err
}
if err := writeDrupal8DdevSettingsFile(drupalConfig, app.SiteDdevSettingsFile); err != nil {
return "", fmt.Errorf("failed to write Drupal settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return app.SiteDdevSettingsFile, nil
}
// createDrupal9SettingsFile is just a wrapper on d8
func createDrupal9SettingsFile(app *DdevApp) (string, error) {
return createDrupal8SettingsFile(app)
}
// createDrupal6SettingsFile manages creation and modification of settings.php and settings.ddev.php.
// If a settings.php file already exists, it will be modified to ensure that it includes
// settings.ddev.php, which contains ddev-specific configuration.
func createDrupal6SettingsFile(app *DdevApp) (string, error) {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
// mysqli is required in latest D6LTS and works fine in ddev in old D6
drupalConfig.DatabaseDriver = "mysqli"
if err := manageDrupalSettingsFile(app, drupalConfig, app.Type); err != nil {
return "", err
}
if err := writeDrupal6DdevSettingsFile(drupalConfig, app.SiteDdevSettingsFile); err != nil {
return "", fmt.Errorf("failed to write Drupal settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return app.SiteDdevSettingsFile, nil
}
// writeDrupal8DdevSettingsFile dynamically produces valid settings.ddev.php file by combining a configuration
// object with a data-driven template.
func writeDrupal8DdevSettingsFile(settings *DrupalSettings, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(drupal8DdevSettingsTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
defer util.CheckClose(file)
if err := tmpl.Execute(file, settings); err != nil {
return err
}
return nil
}
// writeDrupal7DdevSettingsFile dynamically produces valid settings.ddev.php file by combining a configuration
// object with a data-driven template.
func writeDrupal7DdevSettingsFile(settings *DrupalSettings, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(drupal7DdevSettingsTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
err = tmpl.Execute(file, settings)
if err != nil {
return err
}
util.CheckClose(file)
return nil
}
// writeDrupal6DdevSettingsFile dynamically produces valid settings.ddev.php file by combining a configuration
// object with a data-driven template.
func writeDrupal6DdevSettingsFile(settings *DrupalSettings, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(drupal6DdevSettingsTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
err = tmpl.Execute(file, settings)
if err != nil {
return err
}
util.CheckClose(file)
return nil
}
// WriteDrushrc writes out drushrc.php based on passed-in values.
// This works on Drupal 6 and Drupal 7 or with drush8 and older
func WriteDrushrc(app *DdevApp, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
uri := app.GetPrimaryURL()
drushContents := []byte(`<?php
/** ` + DdevFileSignature + `: Automatically generated drushrc.php file (for Drush 8)
ddev manages this file and may delete or overwrite the file unless this comment is removed.
Remove this comment if you don't want ddev to manage this file.'
*/
if (getenv('IS_DDEV_PROJECT') == 'true') {
$options['l'] = "` + uri + `";
}
`)
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err := os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
err := ioutil.WriteFile(filePath, drushContents, 0666)
if err != nil {
return err
}
return nil
}
// getDrupalUploadDir will return a custom upload dir if defined, returning a default path if not.
func getDrupalUploadDir(app *DdevApp) string {
if app.UploadDir == "" {
return "sites/default/files"
}
return app.UploadDir
}
// Drupal8Hooks adds a d8-specific hooks example for post-import-db
const Drupal8Hooks = `# post-import-db:
# - exec: drush cr
# - exec: drush updb
`
// Drupal7Hooks adds a d7-specific hooks example for post-import-db
const Drupal7Hooks = `# post-import-db:
# - exec: drush cc all
`
// getDrupal7Hooks for appending as byte array
func getDrupal7Hooks() []byte {
return []byte(Drupal7Hooks)
}
// getDrupal6Hooks for appending as byte array
func getDrupal6Hooks() []byte {
// We don't have anything new to add yet, so just use Drupal7 version
return []byte(Drupal7Hooks)
}
// getDrupal8Hooks for appending as byte array
func getDrupal8Hooks() []byte {
return []byte(Drupal8Hooks)
}
// setDrupalSiteSettingsPaths sets the paths to settings.php/settings.ddev.php
// for templating.
func setDrupalSiteSettingsPaths(app *DdevApp) {
drupalConfig := NewDrupalSettings(app)
settingsFileBasePath := filepath.Join(app.AppRoot, app.Docroot)
app.SiteSettingsPath = filepath.Join(settingsFileBasePath, drupalConfig.SitePath, drupalConfig.SiteSettings)
app.SiteDdevSettingsFile = filepath.Join(settingsFileBasePath, drupalConfig.SitePath, drupalConfig.SiteSettingsDdev)
}
// isDrupal7App returns true if the app is of type drupal7
func isDrupal7App(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "misc/ajax.js")); err == nil {
return true
}
return false
}
// isDrupal8App returns true if the app is of type drupal8
func isDrupal8App(app *DdevApp) bool {
isD8, err := fileutil.FgrepStringInFile(filepath.Join(app.AppRoot, app.Docroot, "core/lib/Drupal.php"), `const VERSION = '8`)
if err == nil && isD8 {
return true
}
return false
}
// isDrupal9App returns true if the app is of type drupal9
func isDrupal9App(app *DdevApp) bool {
isD9, err := fileutil.FgrepStringInFile(filepath.Join(app.AppRoot, app.Docroot, "core/lib/Drupal.php"), `const VERSION = '9`)
if err == nil && isD9 {
return true
}
return false
}
// isDrupal6App returns true if the app is of type Drupal6
func isDrupal6App(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "misc/ahah.js")); err == nil {
return true
}
return false
}
// drupal6ConfigOverrideAction overrides php_version for D6, since it is incompatible
// with php7+
func drupal6ConfigOverrideAction(app *DdevApp) error {
app.PHPVersion = nodeps.PHP56
return nil
}
// drupal8ConfigOverrideAction overrides mariadb_version for Druapl 8 for future
// compatibility with Drupal 9, since it requires at least 10.3.
func drupal8ConfigOverrideAction(app *DdevApp) error {
app.MariaDBVersion = nodeps.MariaDB103
return nil
}
// drupal9ConfigOverrideAction overrides mariadb_version for D9,
// since it requires at least 10.3
func drupal9ConfigOverrideAction(app *DdevApp) error {
app.MariaDBVersion = nodeps.MariaDB103
return nil
}
// drupal8PostStartAction handles default post-start actions for D8 apps, like ensuring
// useful permissions settings on sites/default.
func drupal8PostStartAction(app *DdevApp) error {
if !app.DisableSettingsManagement {
if err := createDrupal8SyncDir(app); err != nil {
return err
}
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
if _, err := app.CreateSettingsFile(); err != nil {
return fmt.Errorf("failed to write settings file %s: %v", app.SiteDdevSettingsFile, err)
}
}
return nil
}
// drupal7PostStartAction handles default post-start actions for D7 apps, like ensuring
// useful permissions settings on sites/default.
func drupal7PostStartAction(app *DdevApp) error {
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
err := WriteDrushrc(app, filepath.Join(filepath.Dir(app.SiteSettingsPath), "drushrc.php"))
if err != nil {
util.Warning("Failed to WriteDrushrc: %v", err)
}
if _, err = app.CreateSettingsFile(); err != nil {
return fmt.Errorf("failed to write settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return nil
}
// drupal6PostStartAction handles default post-start actions for D6 apps, like ensuring
// useful permissions settings on sites/default.
func drupal6PostStartAction(app *DdevApp) error {
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
err := WriteDrushrc(app, filepath.Join(filepath.Dir(app.SiteSettingsPath), "drushrc.php"))
if err != nil {
util.Warning("Failed to WriteDrushrc: %v", err)
}
if _, err = app.CreateSettingsFile(); err != nil {
return fmt.Errorf("failed to write settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return nil
}
// drupalEnsureWritePerms will ensure sites/default and sites/default/settings.php will
// have the appropriate permissions for development.
func drupalEnsureWritePerms(app *DdevApp) error {
output.UserOut.Printf("Ensuring write permissions for %s", app.GetName())
var writePerms os.FileMode = 0200
settingsDir := path.Dir(app.SiteSettingsPath)
makeWritable := []string{
settingsDir,
app.SiteSettingsPath,
app.SiteDdevSettingsFile,
path.Join(settingsDir, "services.yml"),
}
for _, o := range makeWritable {
stat, err := os.Stat(o)
if err != nil {
if !os.IsNotExist(err) {
util.Warning("Unable to ensure write permissions: %v", err)
}
continue
}
if err := os.Chmod(o, stat.Mode()|writePerms); err != nil {
// Warn the user, but continue.
util.Warning("Unable to set permissions: %v", err)
}
}
return nil
}
// createDrupal8SyncDir creates a Drupal 8 app's sync directory
func createDrupal8SyncDir(app *DdevApp) error {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
syncDirPath := path.Join(app.GetAppRoot(), app.GetDocroot(), "sites/default", drupalConfig.SyncDir)
if fileutil.FileExists(syncDirPath) {
return nil
}
if err := os.MkdirAll(syncDirPath, 0755); err != nil {
return fmt.Errorf("failed to create sync directory (%s): %v", syncDirPath, err)
}
return nil
}
// settingsHasInclude determines if the settings.php or equivalent includes settings.ddev.php or equivalent.
// This is done by looking for the ddev settings file (settings.ddev.php) in settings.php.
func settingsHasInclude(drupalConfig *DrupalSettings, siteSettingsPath string) (bool, error) {
included, err := fileutil.FgrepStringInFile(siteSettingsPath, drupalConfig.SiteSettingsDdev)
if err != nil {
return false, err
}
return included, nil
}
// appendIncludeToDrupalSettingsFile modifies the settings.php file to include the settings.ddev.php
// file, which contains ddev-specific configuration.
func appendIncludeToDrupalSettingsFile(siteSettingsPath string, appType string) error {
// Check if file is empty
contents, err := ioutil.ReadFile(siteSettingsPath)
if err != nil {
return err
}
// If the file is empty, write the complete settings file and return
if len(contents) == 0 {
return writeDrupalSettingsFile(siteSettingsPath, appType)
}
// The file is not empty, open it for appending
file, err := os.OpenFile(siteSettingsPath, os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
defer util.CheckClose(file)
_, err = file.Write([]byte(settingsIncludeStanza))
if err != nil {
return err
}
return nil
}
// drupalImportFilesAction defines the Drupal workflow for importing project files.
func drupalImportFilesAction(app *DdevApp, importPath, extPath string) error {
destPath := filepath.Join(app.GetAppRoot(), app.GetDocroot(), app.GetUploadDir())
// parent of destination dir should exist
if !fileutil.FileExists(filepath.Dir(destPath)) {
return fmt.Errorf("unable to import to %s: parent directory does not exist", destPath)
}
// parent of destination dir should be writable.
if err := os.Chmod(filepath.Dir(destPath), 0755); err != nil {
return err
}
// If the destination path exists, remove it as was warned
if fileutil.FileExists(destPath) {
if err := os.RemoveAll(destPath); err != nil {
return fmt.Errorf("failed to cleanup %s before import: %v", destPath, err)
}
}
if isTar(importPath) {
if err := archive.Untar(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if isZip(importPath) {
if err := archive.Unzip(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if err := fileutil.CopyDir(importPath, destPath); err != nil {
return err
}
return nil
}
| 1 | 14,956 | This isn't incorrect IMO, but I think it would be better to fix this in drupal7PostStartAction and also in drupal6PostStartAction. It seems to me like those were both somehow neglected on this. Use drupal8PostStartAction as example. Congrats on your first golang PR! Please make sure to test it manually. | drud-ddev | go |
@@ -10,8 +10,10 @@ test_name 'use the provision subcommand' do
delete_root_folder_contents
on(default, 'beaker init')
result = on(default, 'beaker provision --hosts centos6-64')
+ assert_match(/ERROR/, result.raw_output)
+ on(default, 'beaker init --hosts centos6-64')
+ result = on(default, 'beaker provision')
assert_match(/Using available host/, result.stdout)
-
subcommand_state = on(default, "cat #{SubcommandUtil::SUBCOMMAND_STATE}").stdout
subcommand_state = YAML.parse(subcommand_state).to_ruby
assert_equal(true, subcommand_state['provisioned']) | 1 | test_name 'use the provision subcommand' do
SubcommandUtil = Beaker::Subcommands::SubcommandUtil
def delete_root_folder_contents
on default, 'rm -rf /root/* /root/.beaker'
end
step 'run beaker init and provision' do
delete_root_folder_contents
on(default, 'beaker init')
result = on(default, 'beaker provision --hosts centos6-64')
assert_match(/Using available host/, result.stdout)
subcommand_state = on(default, "cat #{SubcommandUtil::SUBCOMMAND_STATE}").stdout
subcommand_state = YAML.parse(subcommand_state).to_ruby
assert_equal(true, subcommand_state['provisioned'])
end
end
| 1 | 15,058 | I'd like to ensure that the error message at least has some reference to the flag that is not allowed. Something like `/ERROR(.+)--hosts/` would work. | voxpupuli-beaker | rb |
@@ -470,8 +470,8 @@ https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-er
publicSubnets, err := o.selVPC.PublicSubnets(envInitPublicSubnetsSelectPrompt, "", o.importVPC.ID)
if err != nil {
if err == selector.ErrSubnetsNotFound {
- log.Warningf(`No existing public subnets were found in VPC %s.
-If you proceed without public subnets, you will not be able to deploy Load Balanced Web Services in this environment.
+ log.Warningf(`No existing subnets were found in VPC %s.
+If you proceed without specifying public subnets, you will not be able to deploy Load Balanced Web Services in this environment.
`, o.importVPC.ID)
} else {
return fmt.Errorf("select public subnets: %w", err) | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"net"
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/iam"
"github.com/aws/copilot-cli/internal/pkg/aws/identity"
"github.com/aws/copilot-cli/internal/pkg/aws/profile"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
deploycfn "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
envInitAppNameHelpPrompt = "An environment will be created in the selected application."
envInitNamePrompt = "What is your environment's name?"
envInitNameHelpPrompt = "A unique identifier for an environment (e.g. dev, test, prod)."
envInitDefaultEnvConfirmPrompt = `Would you like to use the default configuration for a new environment?
- A new VPC with 2 AZs, 2 public subnets and 2 private subnets
- A new ECS Cluster
- New IAM Roles to manage services and jobs in your environment
`
envInitVPCSelectPrompt = "Which VPC would you like to use?"
envInitPublicSubnetsSelectPrompt = "Which public subnets would you like to use?\nYou may choose to press 'Enter' to skip this step if the services and/or jobs you'll deploy to this environment are not internet-facing."
envInitPrivateSubnetsSelectPrompt = "Which private subnets would you like to use?"
envInitVPCCIDRPrompt = "What VPC CIDR would you like to use?"
envInitVPCCIDRPromptHelp = "CIDR used for your VPC. For example: 10.1.0.0/16"
envInitPublicCIDRPrompt = "What CIDR would you like to use for your public subnets?"
envInitPublicCIDRPromptHelp = "CIDRs used for your public subnets. For example: 10.1.0.0/24,10.1.1.0/24"
envInitPrivateCIDRPrompt = "What CIDR would you like to use for your private subnets?"
envInitPrivateCIDRPromptHelp = "CIDRs used for your private subnets. For example: 10.1.2.0/24,10.1.3.0/24"
fmtEnvInitCredsPrompt = "Which credentials would you like to use to create %s?"
envInitCredsHelpPrompt = `The credentials are used to create your environment in an AWS account and region.
To learn more:
https://aws.github.io/copilot-cli/docs/credentials/#environment-credentials`
envInitRegionPrompt = "Which region?"
envInitDefaultRegionOption = "us-west-2"
fmtDNSDelegationStart = "Sharing DNS permissions for this application to account %s."
fmtDNSDelegationFailed = "Failed to grant DNS permissions to account %s.\n\n"
fmtDNSDelegationComplete = "Shared DNS permissions for this application to account %s.\n\n"
fmtAddEnvToAppStart = "Linking account %s and region %s to application %s."
fmtAddEnvToAppFailed = "Failed to link account %s and region %s to application %s.\n\n"
fmtAddEnvToAppComplete = "Linked account %s and region %s to application %s.\n\n"
)
var (
envInitAppNamePrompt = fmt.Sprintf("In which %s would you like to create the environment?", color.Emphasize("application"))
envInitDefaultConfigSelectOption = "Yes, use default."
envInitAdjustEnvResourcesSelectOption = "Yes, but I'd like configure the default resources (CIDR ranges)."
envInitImportEnvResourcesSelectOption = "No, I'd like to import existing resources (VPC, subnets)."
envInitCustomizedEnvTypes = []string{envInitDefaultConfigSelectOption, envInitAdjustEnvResourcesSelectOption, envInitImportEnvResourcesSelectOption}
)
type importVPCVars struct {
ID string
PublicSubnetIDs []string
PrivateSubnetIDs []string
}
func (v importVPCVars) isSet() bool {
if v.ID != "" {
return true
}
return len(v.PublicSubnetIDs) > 0 || len(v.PrivateSubnetIDs) > 0
}
type adjustVPCVars struct {
CIDR net.IPNet
PublicSubnetCIDRs []string
PrivateSubnetCIDRs []string
}
func (v adjustVPCVars) isSet() bool {
if v.CIDR.String() != emptyIPNet.String() {
return true
}
return len(v.PublicSubnetCIDRs) != 0 || len(v.PrivateSubnetCIDRs) != 0
}
type tempCredsVars struct {
AccessKeyID string
SecretAccessKey string
SessionToken string
}
func (v tempCredsVars) isSet() bool {
return v.AccessKeyID != "" && v.SecretAccessKey != ""
}
type initEnvVars struct {
appName string
name string // Name for the environment.
profile string // The named profile to use for credential retrieval. Mutually exclusive with tempCreds.
isProduction bool // True means retain resources even after deletion.
defaultConfig bool // True means using default environment configuration.
importVPC importVPCVars // Existing VPC resources to use instead of creating new ones.
adjustVPC adjustVPCVars // Configure parameters for VPC resources generated while initializing an environment.
tempCreds tempCredsVars // Temporary credentials to initialize the environment. Mutually exclusive with the profile.
region string // The region to create the environment in.
}
type initEnvOpts struct {
initEnvVars
// Interfaces to interact with dependencies.
sessProvider sessionProvider
store store
envDeployer deployer
appDeployer deployer
identity identityService
envIdentity identityService
ec2Client ec2Client
iam roleManager
cfn stackExistChecker
prog progress
prompt prompter
selVPC ec2Selector
selCreds credsSelector
selApp appSelector
appCFN appResourcesGetter
newS3 func(string) (zipAndUploader, error)
uploader customResourcesUploader
sess *session.Session // Session pointing to environment's AWS account and region.
}
func newInitEnvOpts(vars initEnvVars) (*initEnvOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, err
}
sessProvider := sessions.NewProvider()
defaultSession, err := sessProvider.Default()
if err != nil {
return nil, err
}
cfg, err := profile.NewConfig()
if err != nil {
return nil, fmt.Errorf("read named profiles: %w", err)
}
prompter := prompt.New()
return &initEnvOpts{
initEnvVars: vars,
sessProvider: sessProvider,
store: store,
appDeployer: deploycfn.New(defaultSession),
identity: identity.New(defaultSession),
prog: termprogress.NewSpinner(log.DiagnosticWriter),
prompt: prompter,
selCreds: &selector.CredsSelect{
Session: sessProvider,
Profile: cfg,
Prompt: prompter,
},
selApp: selector.NewSelect(prompt.New(), store),
uploader: template.New(),
appCFN: deploycfn.New(defaultSession),
newS3: func(region string) (zipAndUploader, error) {
sess, err := sessProvider.DefaultWithRegion(region)
if err != nil {
return nil, err
}
return s3.New(sess), nil
},
}, nil
}
// Validate returns an error if the values passed by flags are invalid.
func (o *initEnvOpts) Validate() error {
if o.name != "" {
if err := validateEnvironmentName(o.name); err != nil {
return err
}
}
if err := o.validateCustomizedResources(); err != nil {
return err
}
return o.validateCredentials()
}
// Ask asks for fields that are required but not passed in.
func (o *initEnvOpts) Ask() error {
if err := o.askAppName(); err != nil {
return err
}
if err := o.askEnvName(); err != nil {
return err
}
if err := o.askEnvSession(); err != nil {
return err
}
if err := o.askEnvRegion(); err != nil {
return err
}
return o.askCustomizedResources()
}
// Execute deploys a new environment with CloudFormation and adds it to SSM.
func (o *initEnvOpts) Execute() error {
o.initRuntimeClients()
app, err := o.store.GetApplication(o.appName)
if err != nil {
// Ensure the app actually exists before we do a deployment.
return err
}
envCaller, err := o.envIdentity.Get()
if err != nil {
return fmt.Errorf("get identity: %w", err)
}
if app.RequiresDNSDelegation() {
if err := o.delegateDNSFromApp(app, envCaller.Account); err != nil {
return fmt.Errorf("granting DNS permissions: %w", err)
}
}
// 1. Attempt to create the service linked role if it doesn't exist.
// If the call fails because the role already exists, nothing to do.
// If the call fails because the user doesn't have permissions, then the role must be created outside of Copilot.
_ = o.iam.CreateECSServiceLinkedRole()
// 2. Add the stack set instance to the app stackset.
if err := o.addToStackset(&deploycfn.AddEnvToAppOpts{
App: app,
EnvName: o.name,
EnvRegion: aws.StringValue(o.sess.Config.Region),
EnvAccountID: envCaller.Account,
}); err != nil {
return err
}
// 3. Upload environment custom resource scripts to the S3 bucket, because of the 4096 characters limit (see
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-code.html#cfn-lambda-function-code-zipfile)
envRegion := aws.StringValue(o.sess.Config.Region)
resources, err := o.appCFN.GetAppResourcesByRegion(app, envRegion)
if err != nil {
return fmt.Errorf("get app resources: %w", err)
}
s3Client, err := o.newS3(envRegion)
if err != nil {
return err
}
urls, err := o.uploader.UploadEnvironmentCustomResources(s3.CompressAndUploadFunc(func(key string, objects ...s3.NamedBinary) (string, error) {
return s3Client.ZipAndUpload(resources.S3Bucket, key, objects...)
}))
if err != nil {
return fmt.Errorf("upload custom resources to bucket %s: %w", resources.S3Bucket, err)
}
// 4. Start creating the CloudFormation stack for the environment.
if err := o.deployEnv(app, urls); err != nil {
return err
}
// 5. Get the environment
env, err := o.envDeployer.GetEnvironment(o.appName, o.name)
if err != nil {
return fmt.Errorf("get environment struct for %s: %w", o.name, err)
}
env.Prod = o.isProduction
env.CustomConfig = config.NewCustomizeEnv(o.importVPCConfig(), o.adjustVPCConfig())
// 6. Store the environment in SSM.
if err := o.store.CreateEnvironment(env); err != nil {
return fmt.Errorf("store environment: %w", err)
}
log.Successf("Created environment %s in region %s under application %s.\n",
color.HighlightUserInput(env.Name), color.Emphasize(env.Region), color.HighlightUserInput(env.App))
return nil
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (o *initEnvOpts) RecommendedActions() []string {
return nil
}
func (o *initEnvOpts) initRuntimeClients() {
// Initialize environment clients if not set.
if o.envIdentity == nil {
o.envIdentity = identity.New(o.sess)
}
if o.envDeployer == nil {
o.envDeployer = deploycfn.New(o.sess)
}
if o.cfn == nil {
o.cfn = cloudformation.New(o.sess)
}
if o.iam == nil {
o.iam = iam.New(o.sess)
}
}
func (o *initEnvOpts) validateCustomizedResources() error {
if o.importVPC.isSet() && o.adjustVPC.isSet() {
return errors.New("cannot specify both import vpc flags and configure vpc flags")
}
if (o.importVPC.isSet() || o.adjustVPC.isSet()) && o.defaultConfig {
return fmt.Errorf("cannot import or configure vpc if --%s is set", defaultConfigFlag)
}
if o.importVPC.isSet() {
// We allow 0 or 2+ public subnets.
if len(o.importVPC.PublicSubnetIDs) == 1 {
return fmt.Errorf("at least two public subnets must be imported to enable Load Balancing")
}
// We require 2+ private subnets.
if len(o.importVPC.PrivateSubnetIDs) < 2 {
return fmt.Errorf("at least two private subnets must be imported")
}
}
return nil
}
func (o *initEnvOpts) askAppName() error {
if o.appName != "" {
return nil
}
app, err := o.selApp.Application(envInitAppNamePrompt, envInitAppNameHelpPrompt)
if err != nil {
return fmt.Errorf("ask for application: %w", err)
}
o.appName = app
return nil
}
func (o *initEnvOpts) askEnvName() error {
if o.name != "" {
return nil
}
envName, err := o.prompt.Get(envInitNamePrompt, envInitNameHelpPrompt, validateEnvironmentName)
if err != nil {
return fmt.Errorf("get environment name: %w", err)
}
o.name = envName
return nil
}
func (o *initEnvOpts) askEnvSession() error {
if o.profile != "" {
sess, err := o.sessProvider.FromProfile(o.profile)
if err != nil {
return fmt.Errorf("create session from profile %s: %w", o.profile, err)
}
o.sess = sess
return nil
}
if o.tempCreds.isSet() {
sess, err := o.sessProvider.FromStaticCreds(o.tempCreds.AccessKeyID, o.tempCreds.SecretAccessKey, o.tempCreds.SessionToken)
if err != nil {
return err
}
o.sess = sess
return nil
}
sess, err := o.selCreds.Creds(fmt.Sprintf(fmtEnvInitCredsPrompt, color.HighlightUserInput(o.name)), envInitCredsHelpPrompt)
if err != nil {
return fmt.Errorf("select creds: %w", err)
}
o.sess = sess
return nil
}
func (o *initEnvOpts) askEnvRegion() error {
region := aws.StringValue(o.sess.Config.Region)
if o.region != "" {
region = o.region
}
if region == "" {
v, err := o.prompt.Get(envInitRegionPrompt, "", nil, prompt.WithDefaultInput(envInitDefaultRegionOption))
if err != nil {
return fmt.Errorf("get environment region: %w", err)
}
region = v
}
o.sess.Config.Region = aws.String(region)
return nil
}
func (o *initEnvOpts) askCustomizedResources() error {
if o.defaultConfig {
return nil
}
if o.importVPC.isSet() {
return o.askImportResources()
}
if o.adjustVPC.isSet() {
return o.askAdjustResources()
}
adjustOrImport, err := o.prompt.SelectOne(
envInitDefaultEnvConfirmPrompt, "",
envInitCustomizedEnvTypes)
if err != nil {
return fmt.Errorf("select adjusting or importing resources: %w", err)
}
switch adjustOrImport {
case envInitImportEnvResourcesSelectOption:
return o.askImportResources()
case envInitAdjustEnvResourcesSelectOption:
return o.askAdjustResources()
case envInitDefaultConfigSelectOption:
return nil
}
return nil
}
func (o *initEnvOpts) askImportResources() error {
if o.selVPC == nil {
o.selVPC = selector.NewEC2Select(o.prompt, ec2.New(o.sess))
}
if o.importVPC.ID == "" {
vpcID, err := o.selVPC.VPC(envInitVPCSelectPrompt, "")
if err != nil {
if err == selector.ErrVPCNotFound {
log.Errorf(`No existing VPCs were found. You can either:
- Create a new VPC first and then import it.
- Use the default Copilot environment configuration.
`)
}
return fmt.Errorf("select VPC: %w", err)
}
o.importVPC.ID = vpcID
}
if o.ec2Client == nil {
o.ec2Client = ec2.New(o.sess)
}
dnsSupport, err := o.ec2Client.HasDNSSupport(o.importVPC.ID)
if err != nil {
return fmt.Errorf("check if VPC %s has DNS support enabled: %w", o.importVPC.ID, err)
}
if !dnsSupport {
log.Errorln(`Looks like you're creating an environment using a VPC with DNS support *disabled*.
Copilot cannot create services or jobs in VPCs without DNS support. We recommend enabling this property.
To learn more about the issue:
https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/`)
return fmt.Errorf("VPC %s has no DNS support enabled", o.importVPC.ID)
}
if o.importVPC.PublicSubnetIDs == nil {
publicSubnets, err := o.selVPC.PublicSubnets(envInitPublicSubnetsSelectPrompt, "", o.importVPC.ID)
if err != nil {
if err == selector.ErrSubnetsNotFound {
log.Warningf(`No existing public subnets were found in VPC %s.
If you proceed without public subnets, you will not be able to deploy Load Balanced Web Services in this environment.
`, o.importVPC.ID)
} else {
return fmt.Errorf("select public subnets: %w", err)
}
}
if len(publicSubnets) == 1 {
return errors.New("select public subnets: at least two public subnets must be selected to enable Load Balancing")
}
o.importVPC.PublicSubnetIDs = publicSubnets
}
if o.importVPC.PrivateSubnetIDs == nil {
privateSubnets, err := o.selVPC.PrivateSubnets(envInitPrivateSubnetsSelectPrompt, "", o.importVPC.ID)
if err != nil {
if err == selector.ErrSubnetsNotFound {
log.Errorf(`No existing private subnets were found in VPC %s. You can either:
- Create new private subnets and then import them.
- Use the default Copilot environment configuration.`, o.importVPC.ID)
}
return fmt.Errorf("select private subnets: %w", err)
}
if len(privateSubnets) == 1 {
return errors.New("select private subnets: at least two private subnets must be selected")
}
o.importVPC.PrivateSubnetIDs = privateSubnets
}
return nil
}
func (o *initEnvOpts) askAdjustResources() error {
if o.adjustVPC.CIDR.String() == emptyIPNet.String() {
vpcCIDRString, err := o.prompt.Get(envInitVPCCIDRPrompt, envInitVPCCIDRPromptHelp, validateCIDR,
prompt.WithDefaultInput(stack.DefaultVPCCIDR))
if err != nil {
return fmt.Errorf("get VPC CIDR: %w", err)
}
_, vpcCIDR, err := net.ParseCIDR(vpcCIDRString)
if err != nil {
return fmt.Errorf("parse VPC CIDR: %w", err)
}
o.adjustVPC.CIDR = *vpcCIDR
}
if o.adjustVPC.PublicSubnetCIDRs == nil {
publicCIDR, err := o.prompt.Get(envInitPublicCIDRPrompt, envInitPublicCIDRPromptHelp, validateCIDRSlice,
prompt.WithDefaultInput(stack.DefaultPublicSubnetCIDRs))
if err != nil {
return fmt.Errorf("get public subnet CIDRs: %w", err)
}
o.adjustVPC.PublicSubnetCIDRs = strings.Split(publicCIDR, ",")
}
if o.adjustVPC.PrivateSubnetCIDRs == nil {
privateCIDR, err := o.prompt.Get(envInitPrivateCIDRPrompt, envInitPrivateCIDRPromptHelp, validateCIDRSlice,
prompt.WithDefaultInput(stack.DefaultPrivateSubnetCIDRs))
if err != nil {
return fmt.Errorf("get private subnet CIDRs: %w", err)
}
o.adjustVPC.PrivateSubnetCIDRs = strings.Split(privateCIDR, ",")
}
return nil
}
func (o *initEnvOpts) importVPCConfig() *config.ImportVPC {
if o.defaultConfig || !o.importVPC.isSet() {
return nil
}
return &config.ImportVPC{
ID: o.importVPC.ID,
PrivateSubnetIDs: o.importVPC.PrivateSubnetIDs,
PublicSubnetIDs: o.importVPC.PublicSubnetIDs,
}
}
func (o *initEnvOpts) adjustVPCConfig() *config.AdjustVPC {
if o.defaultConfig || !o.adjustVPC.isSet() {
return nil
}
return &config.AdjustVPC{
CIDR: o.adjustVPC.CIDR.String(),
PrivateSubnetCIDRs: o.adjustVPC.PrivateSubnetCIDRs,
PublicSubnetCIDRs: o.adjustVPC.PublicSubnetCIDRs,
}
}
func (o *initEnvOpts) deployEnv(app *config.Application, customResourcesURLs map[string]string) error {
caller, err := o.identity.Get()
if err != nil {
return fmt.Errorf("get identity: %w", err)
}
deployEnvInput := &deploy.CreateEnvironmentInput{
Name: o.name,
AppName: o.appName,
Prod: o.isProduction,
ToolsAccountPrincipalARN: caller.RootUserARN,
AppDNSName: app.Domain,
AdditionalTags: app.Tags,
CustomResourcesURLs: customResourcesURLs,
AdjustVPCConfig: o.adjustVPCConfig(),
ImportVPCConfig: o.importVPCConfig(),
Version: deploy.LatestEnvTemplateVersion,
}
if err := o.cleanUpDanglingRoles(o.appName, o.name); err != nil {
return err
}
if err := o.envDeployer.DeployAndRenderEnvironment(os.Stderr, deployEnvInput); err != nil {
var existsErr *cloudformation.ErrStackAlreadyExists
if errors.As(err, &existsErr) {
// Do nothing if the stack already exists.
return nil
}
// The stack failed to create due to an unexpect reason.
// Delete the retained roles created part of the stack.
o.tryDeletingEnvRoles(o.appName, o.name)
return err
}
return nil
}
func (o *initEnvOpts) addToStackset(opts *deploycfn.AddEnvToAppOpts) error {
o.prog.Start(fmt.Sprintf(fmtAddEnvToAppStart, color.Emphasize(opts.EnvAccountID), color.Emphasize(opts.EnvRegion), color.HighlightUserInput(o.appName)))
if err := o.appDeployer.AddEnvToApp(opts); err != nil {
o.prog.Stop(log.Serrorf(fmtAddEnvToAppFailed, color.Emphasize(opts.EnvAccountID), color.Emphasize(opts.EnvRegion), color.HighlightUserInput(o.appName)))
return fmt.Errorf("deploy env %s to application %s: %w", opts.EnvName, opts.App.Name, err)
}
o.prog.Stop(log.Ssuccessf(fmtAddEnvToAppComplete, color.Emphasize(opts.EnvAccountID), color.Emphasize(opts.EnvRegion), color.HighlightUserInput(o.appName)))
return nil
}
func (o *initEnvOpts) delegateDNSFromApp(app *config.Application, accountID string) error {
// By default, our DNS Delegation permits same account delegation.
if accountID == app.AccountID {
return nil
}
o.prog.Start(fmt.Sprintf(fmtDNSDelegationStart, color.HighlightUserInput(accountID)))
if err := o.appDeployer.DelegateDNSPermissions(app, accountID); err != nil {
o.prog.Stop(log.Serrorf(fmtDNSDelegationFailed, color.HighlightUserInput(accountID)))
return err
}
o.prog.Stop(log.Ssuccessf(fmtDNSDelegationComplete, color.HighlightUserInput(accountID)))
return nil
}
func (o *initEnvOpts) validateCredentials() error {
if o.profile != "" && o.tempCreds.AccessKeyID != "" {
return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, accessKeyIDFlag)
}
if o.profile != "" && o.tempCreds.SecretAccessKey != "" {
return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, secretAccessKeyFlag)
}
if o.profile != "" && o.tempCreds.SessionToken != "" {
return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, sessionTokenFlag)
}
return nil
}
// cleanUpDanglingRoles deletes any IAM roles created for the same app and env that were left over from a previous
// environment creation.
func (o *initEnvOpts) cleanUpDanglingRoles(app, env string) error {
exists, err := o.cfn.Exists(stack.NameForEnv(app, env))
if err != nil {
return fmt.Errorf("check if stack %s exists: %w", stack.NameForEnv(app, env), err)
}
if exists {
return nil
}
// There is no environment stack. Either the customer ran "env delete" before, or it's their
// first time running this command.
// We should clean up any IAM roles that were *not* deleted during "env delete"
// before re-creating the stack otherwise the deployment will fail.
o.tryDeletingEnvRoles(app, env)
return nil
}
// tryDeletingEnvRoles attempts a best effort deletion of IAM roles created from an environment.
// To ensure that the roles being deleted were created by Copilot, we check if the copilot-environment tag
// is applied to the role.
func (o *initEnvOpts) tryDeletingEnvRoles(app, env string) {
roleNames := []string{
fmt.Sprintf("%s-CFNExecutionRole", stack.NameForEnv(app, env)),
fmt.Sprintf("%s-EnvManagerRole", stack.NameForEnv(app, env)),
}
for _, roleName := range roleNames {
tags, err := o.iam.ListRoleTags(roleName)
if err != nil {
continue
}
if _, hasTag := tags[deploy.EnvTagKey]; !hasTag {
continue
}
_ = o.iam.DeleteRole(roleName)
}
}
// buildEnvInitCmd builds the command for adding an environment.
func buildEnvInitCmd() *cobra.Command {
vars := initEnvVars{}
cmd := &cobra.Command{
Use: "init",
Short: "Creates a new environment in your application.",
Example: `
Creates a test environment in your "default" AWS profile using default configuration.
/code $ copilot env init --name test --profile default --default-config
Creates a prod-iad environment using your "prod-admin" AWS profile.
/code $ copilot env init --name prod-iad --profile prod-admin --prod
Creates an environment with imported VPC resources.
/code $ copilot env init --import-vpc-id vpc-099c32d2b98cdcf47 \
/code --import-public-subnets subnet-013e8b691862966cf,subnet -014661ebb7ab8681a \
/code --import-private-subnets subnet-055fafef48fb3c547,subnet-00c9e76f288363e7f
Creates an environment with overridden CIDRs.
/code $ copilot env init --override-vpc-cidr 10.1.0.0/16 \
/code --override-public-cidrs 10.1.0.0/24,10.1.1.0/24 \
/code --override-private-cidrs 10.1.2.0/24,10.1.3.0/24`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newInitEnvOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
return opts.Execute()
}),
}
cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription)
cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", envFlagDescription)
cmd.Flags().StringVar(&vars.profile, profileFlag, "", profileFlagDescription)
cmd.Flags().StringVar(&vars.tempCreds.AccessKeyID, accessKeyIDFlag, "", accessKeyIDFlagDescription)
cmd.Flags().StringVar(&vars.tempCreds.SecretAccessKey, secretAccessKeyFlag, "", secretAccessKeyFlagDescription)
cmd.Flags().StringVar(&vars.tempCreds.SessionToken, sessionTokenFlag, "", sessionTokenFlagDescription)
cmd.Flags().StringVar(&vars.region, regionFlag, "", envRegionTokenFlagDescription)
cmd.Flags().BoolVar(&vars.isProduction, prodEnvFlag, false, prodEnvFlagDescription)
cmd.Flags().StringVar(&vars.importVPC.ID, vpcIDFlag, "", vpcIDFlagDescription)
cmd.Flags().StringSliceVar(&vars.importVPC.PublicSubnetIDs, publicSubnetsFlag, nil, publicSubnetsFlagDescription)
cmd.Flags().StringSliceVar(&vars.importVPC.PrivateSubnetIDs, privateSubnetsFlag, nil, privateSubnetsFlagDescription)
cmd.Flags().IPNetVar(&vars.adjustVPC.CIDR, vpcCIDRFlag, net.IPNet{}, vpcCIDRFlagDescription)
// TODO: use IPNetSliceVar when it is available (https://github.com/spf13/pflag/issues/273).
cmd.Flags().StringSliceVar(&vars.adjustVPC.PublicSubnetCIDRs, publicSubnetCIDRsFlag, nil, publicSubnetCIDRsFlagDescription)
cmd.Flags().StringSliceVar(&vars.adjustVPC.PrivateSubnetCIDRs, privateSubnetCIDRsFlag, nil, privateSubnetCIDRsFlagDescription)
cmd.Flags().BoolVar(&vars.defaultConfig, defaultConfigFlag, false, defaultConfigFlagDescription)
flags := pflag.NewFlagSet("Common", pflag.ContinueOnError)
flags.AddFlag(cmd.Flags().Lookup(appFlag))
flags.AddFlag(cmd.Flags().Lookup(nameFlag))
flags.AddFlag(cmd.Flags().Lookup(profileFlag))
flags.AddFlag(cmd.Flags().Lookup(accessKeyIDFlag))
flags.AddFlag(cmd.Flags().Lookup(secretAccessKeyFlag))
flags.AddFlag(cmd.Flags().Lookup(sessionTokenFlag))
flags.AddFlag(cmd.Flags().Lookup(regionFlag))
flags.AddFlag(cmd.Flags().Lookup(defaultConfigFlag))
flags.AddFlag(cmd.Flags().Lookup(prodEnvFlag))
resourcesImportFlag := pflag.NewFlagSet("Import Existing Resources", pflag.ContinueOnError)
resourcesImportFlag.AddFlag(cmd.Flags().Lookup(vpcIDFlag))
resourcesImportFlag.AddFlag(cmd.Flags().Lookup(publicSubnetsFlag))
resourcesImportFlag.AddFlag(cmd.Flags().Lookup(privateSubnetsFlag))
resourcesConfigFlag := pflag.NewFlagSet("Configure Default Resources", pflag.ContinueOnError)
resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(vpcCIDRFlag))
resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(publicSubnetCIDRsFlag))
resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(privateSubnetCIDRsFlag))
cmd.Annotations = map[string]string{
// The order of the sections we want to display.
"sections": "Common,Import Existing Resources,Configure Default Resources",
"Common": flags.FlagUsages(),
"Import Existing Resources": resourcesImportFlag.FlagUsages(),
"Configure Default Resources": resourcesConfigFlag.FlagUsages(),
}
cmd.SetUsageTemplate(`{{h1 "Usage"}}{{if .Runnable}}
{{.UseLine}}{{end}}{{$annotations := .Annotations}}{{$sections := split .Annotations.sections ","}}{{if gt (len $sections) 0}}
{{range $i, $sectionName := $sections}}{{h1 (print $sectionName " Flags")}}
{{(index $annotations $sectionName) | trimTrailingWhitespaces}}{{if ne (inc $i) (len $sections)}}
{{end}}{{end}}{{end}}{{if .HasAvailableInheritedFlags}}
{{h1 "Global Flags"}}
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasExample}}
{{h1 "Examples"}}{{code .Example}}{{end}}
`)
return cmd
}
| 1 | 18,017 | Should we say "specifying two public subnets"? | aws-copilot-cli | go |
@@ -32,6 +32,8 @@ func (current *PubSubSource) CheckImmutableFields(ctx context.Context, og apis.I
return nil
}
+ // TODO: revisit this.
+
// All of the fields are immutable because the controller doesn't understand when it would need
// to delete and create a new Receive Adapter with updated arguments. We could relax it slightly
// to allow a nil Sink -> non-nil Sink, but I don't think it is needed yet. | 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"github.com/google/go-cmp/cmp"
"github.com/knative/pkg/apis"
)
func (current *PubSubSource) CheckImmutableFields(ctx context.Context, og apis.Immutable) *apis.FieldError {
original, ok := og.(*PubSubSource)
if !ok {
return &apis.FieldError{Message: "The provided original was not a PubSubSource"}
}
if original == nil {
return nil
}
// All of the fields are immutable because the controller doesn't understand when it would need
// to delete and create a new Receive Adapter with updated arguments. We could relax it slightly
// to allow a nil Sink -> non-nil Sink, but I don't think it is needed yet.
if diff := cmp.Diff(original.Spec, current.Spec); diff != "" {
return &apis.FieldError{
Message: "Immutable fields changed (-old +new)",
Paths: []string{"spec"},
Details: diff,
}
}
return nil
}
| 1 | 7,990 | Issue number? When? Why? | google-knative-gcp | go |
@@ -35,6 +35,7 @@ from ._sapi4 import (
VOICECHARSET
)
import config
+import speech
import nvwave
import weakref
| 1 | #synthDrivers/sapi4.py
#A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2020 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import locale
from collections import OrderedDict
import winreg
from comtypes import CoCreateInstance, COMObject, COMError, GUID
from ctypes import byref, c_ulong, POINTER
from ctypes.wintypes import DWORD, WORD
from synthDriverHandler import SynthDriver,VoiceInfo, synthIndexReached, synthDoneSpeaking
from logHandler import log
from ._sapi4 import (
CLSID_MMAudioDest,
CLSID_TTSEnumerator,
IAudioMultiMediaDevice,
ITTSAttributes,
ITTSBufNotifySink,
ITTSCentralW,
ITTSEnumW,
TextSDATA,
TTSATTR_MAXPITCH,
TTSATTR_MAXSPEED,
TTSATTR_MAXVOLUME,
TTSATTR_MINPITCH,
TTSATTR_MINSPEED,
TTSATTR_MINVOLUME,
TTSDATAFLAG_TAGGED,
TTSFEATURE_PITCH,
TTSFEATURE_SPEED,
TTSFEATURE_VOLUME,
TTSMODEINFO,
VOICECHARSET
)
import config
import nvwave
import weakref
from speech.commands import IndexCommand, SpeechCommand, CharacterModeCommand
class SynthDriverBufSink(COMObject):
_com_interfaces_ = [ITTSBufNotifySink]
def __init__(self, synthRef: weakref.ReferenceType):
self.synthRef = synthRef
self._allowDelete = True
super(SynthDriverBufSink,self).__init__()
def ITTSBufNotifySink_BookMark(self, this, qTimeStamp, dwMarkNum):
synth = self.synthRef()
if synth is None:
log.debugWarning("Called ITTSBufNotifySink_BookMark method on ITTSBufNotifySink while driver is dead")
return
synthIndexReached.notify(synth=synth, index=dwMarkNum)
if synth._finalIndex == dwMarkNum:
synth._finalIndex = None
synthDoneSpeaking.notify(synth=synth)
def IUnknown_Release(self, this, *args, **kwargs):
if not self._allowDelete and self._refcnt.value == 1:
log.debugWarning("ITTSBufNotifySink::Release called too many times by engine")
return 1
return super(SynthDriverBufSink, self).IUnknown_Release(this, *args, **kwargs)
class SynthDriver(SynthDriver):
name="sapi4"
description="Microsoft Speech API version 4"
supportedSettings=[SynthDriver.VoiceSetting()]
supportedNotifications={synthIndexReached,synthDoneSpeaking}
@classmethod
def check(cls):
try:
winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r"CLSID\%s" % CLSID_TTSEnumerator).Close()
return True
except WindowsError:
return False
def _fetchEnginesList(self):
enginesList=[]
self._ttsEngines.Reset()
while True:
mode=TTSMODEINFO()
fetched=c_ulong()
try:
self._ttsEngines.Next(1,byref(mode),byref(fetched))
except:
log.error("can't get next engine",exc_info=True)
break
if fetched.value==0:
break
enginesList.append(mode)
return enginesList
def __init__(self):
self._finalIndex=None
self._bufSink = SynthDriverBufSink(weakref.ref(self))
self._bufSinkPtr=self._bufSink.QueryInterface(ITTSBufNotifySink)
# HACK: Some buggy engines call Release() too many times on our buf sink.
# Therefore, don't let the buf sink be deleted before we release it ourselves.
self._bufSink._allowDelete=False
self._ttsEngines=CoCreateInstance(CLSID_TTSEnumerator, ITTSEnumW)
self._enginesList=self._fetchEnginesList()
if len(self._enginesList)==0:
raise RuntimeError("No Sapi4 engines available")
self.voice=str(self._enginesList[0].gModeID)
def terminate(self):
self._bufSink._allowDelete = True
def speak(self,speechSequence):
textList=[]
charMode=False
item=None
for item in speechSequence:
if isinstance(item,str):
textList.append(item.replace('\\','\\\\'))
elif isinstance(item, IndexCommand):
textList.append("\\mrk=%d\\"%item.index)
elif isinstance(item, CharacterModeCommand):
textList.append("\\RmS=1\\" if item.state else "\\RmS=0\\")
charMode=item.state
elif isinstance(item, SpeechCommand):
log.debugWarning("Unsupported speech command: %s"%item)
else:
log.error("Unknown speech: %s"%item)
if isinstance(item, IndexCommand):
# This is the index denoting the end of the speech sequence.
self._finalIndex=item.index
if charMode:
# Some synths stay in character mode if we don't explicitly disable it.
textList.append("\\RmS=0\\")
# Some SAPI4 synthesizers complete speech sequence just after the last text
# and ignore any indexes passed after it
# Therefore we add the pause of 1ms at the end
textList.append("\\PAU=1\\")
text="".join(textList)
flags=TTSDATAFLAG_TAGGED
self._ttsCentral.TextData(VOICECHARSET.CHARSET_TEXT, flags,TextSDATA(text),self._bufSinkPtr,ITTSBufNotifySink._iid_)
def cancel(self):
self._ttsCentral.AudioReset()
self.lastIndex=None
def pause(self,switch):
if switch:
try:
self._ttsCentral.AudioPause()
except COMError:
pass
else:
self._ttsCentral.AudioResume()
def removeSetting(self,name):
#Putting it here because currently no other synths make use of it. OrderedDict, where you are?
for i,s in enumerate(self.supportedSettings):
if s.name==name:
del self.supportedSettings[i]
return
def _set_voice(self,val):
try:
val=GUID(val)
except:
val=self._enginesList[0].gModeID
mode=None
for mode in self._enginesList:
if mode.gModeID==val:
break
if mode is None:
raise ValueError("no such mode: %s"%val)
self._currentMode=mode
self._ttsAudio=CoCreateInstance(CLSID_MMAudioDest,IAudioMultiMediaDevice)
self._ttsAudio.DeviceNumSet(nvwave.outputDeviceNameToID(config.conf["speech"]["outputDevice"], True))
self._ttsCentral=POINTER(ITTSCentralW)()
self._ttsEngines.Select(self._currentMode.gModeID,byref(self._ttsCentral),self._ttsAudio)
self._ttsAttrs=self._ttsCentral.QueryInterface(ITTSAttributes)
#Find out rate limits
hasRate=bool(mode.dwFeatures&TTSFEATURE_SPEED)
if hasRate:
try:
oldVal=DWORD()
self._ttsAttrs.SpeedGet(byref(oldVal))
self._ttsAttrs.SpeedSet(TTSATTR_MINSPEED)
newVal=DWORD()
self._ttsAttrs.SpeedGet(byref(newVal))
self._minRate=newVal.value
self._ttsAttrs.SpeedSet(TTSATTR_MAXSPEED)
self._ttsAttrs.SpeedGet(byref(newVal))
# ViaVoice (and perhaps other synths) doesn't seem to like the speed being set to maximum.
self._maxRate=newVal.value-1
self._ttsAttrs.SpeedSet(oldVal.value)
if self._maxRate<=self._minRate:
hasRate=False
except COMError:
hasRate=False
if hasRate:
if not self.isSupported('rate'):
self.supportedSettings.insert(1,SynthDriver.RateSetting())
else:
if self.isSupported("rate"): self.removeSetting("rate")
#Find out pitch limits
hasPitch=bool(mode.dwFeatures&TTSFEATURE_PITCH)
if hasPitch:
try:
oldVal=WORD()
self._ttsAttrs.PitchGet(byref(oldVal))
self._ttsAttrs.PitchSet(TTSATTR_MINPITCH)
newVal=WORD()
self._ttsAttrs.PitchGet(byref(newVal))
self._minPitch=newVal.value
self._ttsAttrs.PitchSet(TTSATTR_MAXPITCH)
self._ttsAttrs.PitchGet(byref(newVal))
self._maxPitch=newVal.value
self._ttsAttrs.PitchSet(oldVal.value)
if self._maxPitch<=self._minPitch:
hasPitch=False
except COMError:
hasPitch=False
if hasPitch:
if not self.isSupported('pitch'):
self.supportedSettings.insert(2,SynthDriver.PitchSetting())
else:
if self.isSupported('pitch'): self.removeSetting('pitch')
#Find volume limits
hasVolume=bool(mode.dwFeatures&TTSFEATURE_VOLUME)
if hasVolume:
try:
oldVal=DWORD()
self._ttsAttrs.VolumeGet(byref(oldVal))
self._ttsAttrs.VolumeSet(TTSATTR_MINVOLUME)
newVal=DWORD()
self._ttsAttrs.VolumeGet(byref(newVal))
self._minVolume=newVal.value
self._ttsAttrs.VolumeSet(TTSATTR_MAXVOLUME)
self._ttsAttrs.VolumeGet(byref(newVal))
self._maxVolume=newVal.value
self._ttsAttrs.VolumeSet(oldVal.value)
if self._maxVolume<=self._minVolume:
hasVolume=False
except COMError:
hasVolume=False
if hasVolume:
if not self.isSupported('volume'):
self.supportedSettings.insert(3,SynthDriver.VolumeSetting())
else:
if self.isSupported('volume'): self.removeSetting('volume')
def _get_voice(self):
return str(self._currentMode.gModeID)
def _getAvailableVoices(self):
voices=OrderedDict()
for mode in self._enginesList:
ID=str(mode.gModeID)
name="%s - %s"%(mode.szModeName,mode.szProductName)
try:
language=locale.windows_locale[mode.language.LanguageID]
except KeyError:
language=None
voices[ID]=VoiceInfo(ID,name,language)
return voices
def _get_rate(self):
val=DWORD()
self._ttsAttrs.SpeedGet(byref(val))
return self._paramToPercent(val.value,self._minRate,self._maxRate)
def _set_rate(self,val):
val=self._percentToParam(val,self._minRate,self._maxRate)
self._ttsAttrs.SpeedSet(val)
def _get_pitch(self):
val=WORD()
self._ttsAttrs.PitchGet(byref(val))
return self._paramToPercent(val.value,self._minPitch,self._maxPitch)
def _set_pitch(self,val):
val=self._percentToParam(val,self._minPitch,self._maxPitch)
self._ttsAttrs.PitchSet(val)
def _get_volume(self):
val=DWORD()
self._ttsAttrs.VolumeGet(byref(val))
return self._paramToPercent(val.value&0xffff,self._minVolume&0xffff,self._maxVolume&0xffff)
def _set_volume(self,val):
val=self._percentToParam(val,self._minVolume&0xffff,self._maxVolume&0xffff)
val+=val<<16
self._ttsAttrs.VolumeSet(val)
| 1 | 32,523 | You should not rely on `PitchCommand` being imported into speech. Please import it from `speech.commands`. | nvaccess-nvda | py |
@@ -108,7 +108,7 @@ public abstract class BaseTestIceberg {
newCatalog.setConf(hadoopConfig);
newCatalog.initialize("nessie", ImmutableMap.of("ref", ref,
CatalogProperties.URI, uri,
- "auth_type", "NONE",
+ "auth-type", "NONE",
CatalogProperties.WAREHOUSE_LOCATION, temp.toUri().toString()
));
return newCatalog; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.nessie;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.BaseTable;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.LongType;
import org.apache.iceberg.types.Types.StructType;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.junit.jupiter.api.io.TempDir;
import org.projectnessie.api.ContentsApi;
import org.projectnessie.api.TreeApi;
import org.projectnessie.client.NessieClient;
import org.projectnessie.error.NessieConflictException;
import org.projectnessie.error.NessieNotFoundException;
import org.projectnessie.jaxrs.NessieJaxRsExtension;
import org.projectnessie.model.Branch;
import org.projectnessie.model.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.types.Types.NestedField.required;
public abstract class BaseTestIceberg {
@RegisterExtension
static NessieJaxRsExtension server = new NessieJaxRsExtension();
private static final Logger LOGGER = LoggerFactory.getLogger(BaseTestIceberg.class);
@TempDir
public Path temp;
protected NessieCatalog catalog;
protected NessieClient client;
protected TreeApi tree;
protected ContentsApi contents;
protected Configuration hadoopConfig;
protected final String branch;
private String uri;
public BaseTestIceberg(String branch) {
this.branch = branch;
}
private void resetData() throws NessieConflictException, NessieNotFoundException {
for (Reference r : tree.getAllReferences()) {
if (r instanceof Branch) {
tree.deleteBranch(r.getName(), r.getHash());
} else {
tree.deleteTag(r.getName(), r.getHash());
}
}
tree.createReference(Branch.of("main", null));
}
@BeforeEach
public void beforeEach() throws IOException {
String port = System.getProperty("quarkus.http.test-port", "19120");
uri = server.getURI().toString();
this.client = NessieClient.builder().withUri(uri).build();
tree = client.getTreeApi();
contents = client.getContentsApi();
resetData();
try {
tree.createReference(Branch.of(branch, null));
} catch (Exception e) {
// ignore, already created. Cant run this in BeforeAll as quarkus hasn't disabled auth
}
hadoopConfig = new Configuration();
catalog = initCatalog(branch);
}
NessieCatalog initCatalog(String ref) {
NessieCatalog newCatalog = new NessieCatalog();
newCatalog.setConf(hadoopConfig);
newCatalog.initialize("nessie", ImmutableMap.of("ref", ref,
CatalogProperties.URI, uri,
"auth_type", "NONE",
CatalogProperties.WAREHOUSE_LOCATION, temp.toUri().toString()
));
return newCatalog;
}
protected Table createTable(TableIdentifier tableIdentifier, int count) {
try {
return catalog.createTable(tableIdentifier, schema(count));
} catch (Throwable t) {
LOGGER.error("unable to do create " + tableIdentifier.toString(), t);
throw t;
}
}
protected void createTable(TableIdentifier tableIdentifier) {
Schema schema = new Schema(StructType.of(required(1, "id", LongType.get())).fields());
catalog.createTable(tableIdentifier, schema).location();
}
protected static Schema schema(int count) {
List<Types.NestedField> fields = new ArrayList<>();
for (int i = 0; i < count; i++) {
fields.add(required(i, "id" + i, Types.LongType.get()));
}
return new Schema(Types.StructType.of(fields).fields());
}
void createBranch(String name, String hash) throws NessieNotFoundException, NessieConflictException {
tree.createReference(Branch.of(name, hash));
}
@AfterEach
public void afterEach() throws Exception {
catalog.close();
client.close();
catalog = null;
client = null;
hadoopConfig = null;
}
static String metadataLocation(NessieCatalog catalog, TableIdentifier tableIdentifier) {
Table table = catalog.loadTable(tableIdentifier);
BaseTable baseTable = (BaseTable) table;
TableOperations ops = baseTable.operations();
NessieTableOperations icebergOps = (NessieTableOperations) ops;
return icebergOps.currentMetadataLocation();
}
}
| 1 | 39,296 | Is this a constant used in the Nessie project itself? If so, perhaps you might consider a follow up for adding `NessieCatalogProperties` class at some point, to help make them more clear to users looking to adopt Nessie coming from the Iceberg repo itself | apache-iceberg | java |
@@ -146,12 +146,12 @@ func (db *taskQueueDB) CreateTasks(tasks []*persistencespb.AllocatedTaskInfo) (*
// GetTasks returns a batch of tasks between the given range
func (db *taskQueueDB) GetTasks(minTaskID int64, maxTaskID int64, batchSize int) (*persistence.GetTasksResponse, error) {
return db.store.GetTasks(&persistence.GetTasksRequest{
- NamespaceID: db.namespaceID.String(),
- TaskQueue: db.taskQueueName,
- TaskType: db.taskType,
- BatchSize: batchSize,
- ReadLevel: minTaskID, // exclusive
- MaxReadLevel: &maxTaskID, // inclusive
+ NamespaceID: db.namespaceID.String(),
+ TaskQueue: db.taskQueueName,
+ TaskType: db.taskType,
+ PageSize: batchSize,
+ MinTaskID: minTaskID, // exclusive
+ MaxTaskID: maxTaskID, // inclusive
})
}
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package matching
import (
"sync"
"sync/atomic"
enumspb "go.temporal.io/api/enums/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
)
type (
taskQueueDB struct {
sync.Mutex
namespaceID namespace.ID
taskQueueName string
taskQueueKind enumspb.TaskQueueKind
taskType enumspb.TaskQueueType
rangeID int64
ackLevel int64
store persistence.TaskManager
logger log.Logger
}
taskQueueState struct {
rangeID int64
ackLevel int64
}
)
// newTaskQueueDB returns an instance of an object that represents
// persistence view of a taskQueue. All mutations / reads to taskQueues
// wrt persistence go through this object.
//
// This class will serialize writes to persistence that do condition updates. There are
// two reasons for doing this:
// - To work around known Cassandra issue where concurrent LWT to the same partition cause timeout errors
// - To provide the guarantee that there is only writer who updates taskQueue in persistence at any given point in time
// This guarantee makes some of the other code simpler and there is no impact to perf because updates to taskqueue are
// spread out and happen in background routines
func newTaskQueueDB(store persistence.TaskManager, namespaceID namespace.ID, name string, taskType enumspb.TaskQueueType, kind enumspb.TaskQueueKind, logger log.Logger) *taskQueueDB {
return &taskQueueDB{
namespaceID: namespaceID,
taskQueueName: name,
taskQueueKind: kind,
taskType: taskType,
store: store,
logger: logger,
}
}
// RangeID returns the current persistence view of rangeID
func (db *taskQueueDB) RangeID() int64 {
db.Lock()
defer db.Unlock()
return db.rangeID
}
// RenewLease renews the lease on a taskqueue. If there is no previous lease,
// this method will attempt to steal taskqueue from current owner
func (db *taskQueueDB) RenewLease() (taskQueueState, error) {
db.Lock()
defer db.Unlock()
resp, err := db.store.LeaseTaskQueue(&persistence.LeaseTaskQueueRequest{
NamespaceID: db.namespaceID.String(),
TaskQueue: db.taskQueueName,
TaskType: db.taskType,
TaskQueueKind: db.taskQueueKind,
RangeID: atomic.LoadInt64(&db.rangeID),
})
if err != nil {
return taskQueueState{}, err
}
db.ackLevel = resp.TaskQueueInfo.Data.AckLevel
db.rangeID = resp.TaskQueueInfo.RangeID
return taskQueueState{rangeID: db.rangeID, ackLevel: db.ackLevel}, nil
}
// UpdateState updates the taskQueue state with the given value
func (db *taskQueueDB) UpdateState(ackLevel int64) error {
db.Lock()
defer db.Unlock()
_, err := db.store.UpdateTaskQueue(&persistence.UpdateTaskQueueRequest{
TaskQueueInfo: &persistencespb.TaskQueueInfo{
NamespaceId: db.namespaceID.String(),
Name: db.taskQueueName,
TaskType: db.taskType,
AckLevel: ackLevel,
Kind: db.taskQueueKind,
},
RangeID: db.rangeID,
})
if err == nil {
db.ackLevel = ackLevel
}
return err
}
// CreateTasks creates a batch of given tasks for this task queue
func (db *taskQueueDB) CreateTasks(tasks []*persistencespb.AllocatedTaskInfo) (*persistence.CreateTasksResponse, error) {
db.Lock()
defer db.Unlock()
return db.store.CreateTasks(
&persistence.CreateTasksRequest{
TaskQueueInfo: &persistence.PersistedTaskQueueInfo{
Data: &persistencespb.TaskQueueInfo{
NamespaceId: db.namespaceID.String(),
Name: db.taskQueueName,
TaskType: db.taskType,
AckLevel: db.ackLevel,
Kind: db.taskQueueKind,
},
RangeID: db.rangeID,
},
Tasks: tasks,
})
}
// GetTasks returns a batch of tasks between the given range
func (db *taskQueueDB) GetTasks(minTaskID int64, maxTaskID int64, batchSize int) (*persistence.GetTasksResponse, error) {
return db.store.GetTasks(&persistence.GetTasksRequest{
NamespaceID: db.namespaceID.String(),
TaskQueue: db.taskQueueName,
TaskType: db.taskType,
BatchSize: batchSize,
ReadLevel: minTaskID, // exclusive
MaxReadLevel: &maxTaskID, // inclusive
})
}
// CompleteTask deletes a single task from this task queue
func (db *taskQueueDB) CompleteTask(taskID int64) error {
err := db.store.CompleteTask(&persistence.CompleteTaskRequest{
TaskQueue: &persistence.TaskQueueKey{
NamespaceID: db.namespaceID.String(),
Name: db.taskQueueName,
TaskType: db.taskType,
},
TaskID: taskID,
})
if err != nil {
db.logger.Error("Persistent store operation failure",
tag.StoreOperationCompleteTask,
tag.Error(err),
tag.TaskID(taskID),
tag.WorkflowTaskQueueType(db.taskType),
tag.WorkflowTaskQueueName(db.taskQueueName))
}
return err
}
// CompleteTasksLessThan deletes of tasks less than the given taskID. Limit is
// the upper bound of number of tasks that can be deleted by this method. It may
// or may not be honored
func (db *taskQueueDB) CompleteTasksLessThan(taskID int64, limit int) (int, error) {
n, err := db.store.CompleteTasksLessThan(&persistence.CompleteTasksLessThanRequest{
NamespaceID: db.namespaceID.String(),
TaskQueueName: db.taskQueueName,
TaskType: db.taskType,
TaskID: taskID,
Limit: limit,
})
if err != nil {
db.logger.Error("Persistent store operation failure",
tag.StoreOperationCompleteTasksLessThan,
tag.Error(err),
tag.TaskID(taskID),
tag.WorkflowTaskQueueType(db.taskType),
tag.WorkflowTaskQueueName(db.taskQueueName))
}
return n, err
}
| 1 | 13,559 | maybe we should rename: MinTaskID -> MinTaskIDExclusive, MaxTaskID -> MaxTaskIDInclusive, | temporalio-temporal | go |
@@ -70,7 +70,7 @@ const (
NvidiaVisibleDevicesEnvVar = "NVIDIA_VISIBLE_DEVICES"
GPUAssociationType = "gpu"
- NvidiaRuntime = "nvidia"
+ NvidiaRuntime = "ecs-nvidia"
arnResourceSections = 2
arnResourceDelimiter = "/" | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package task
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth"
"github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types"
taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
)
const (
// NetworkPauseContainerName is the internal name for the pause container
NetworkPauseContainerName = "~internal~ecs~pause"
// NamespacePauseContainerName is the internal name for the IPC resource namespace and/or
// PID namespace sharing pause container
NamespacePauseContainerName = "~internal~ecs~pause~namespace"
emptyHostVolumeName = "~internal~ecs-emptyvolume-source"
// awsSDKCredentialsRelativeURIPathEnvironmentVariableName defines the name of the environment
// variable in containers' config, which will be used by the AWS SDK to fetch
// credentials.
awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
NvidiaVisibleDevicesEnvVar = "NVIDIA_VISIBLE_DEVICES"
GPUAssociationType = "gpu"
NvidiaRuntime = "nvidia"
arnResourceSections = 2
arnResourceDelimiter = "/"
// networkModeNone specifies the string used to define the `none` docker networking mode
networkModeNone = "none"
// dockerMappingContainerPrefix specifies the prefix string used for setting the
// container's option (network, ipc, or pid) to that of another existing container
dockerMappingContainerPrefix = "container:"
// awslogsCredsEndpointOpt is the awslogs option that is used to pass in an
// http endpoint for authentication
awslogsCredsEndpointOpt = "awslogs-credentials-endpoint"
// These contants identify the docker flag options
pidModeHost = "host"
pidModeTask = "task"
ipcModeHost = "host"
ipcModeTask = "task"
ipcModeSharable = "shareable"
ipcModeNone = "none"
)
// TaskOverrides are the overrides applied to a task
type TaskOverrides struct{}
// Task is the internal representation of a task in the ECS agent
type Task struct {
// Arn is the unique identifier for the task
Arn string
// Overrides are the overrides applied to a task
Overrides TaskOverrides `json:"-"`
// Family is the name of the task definition family
Family string
// Version is the version of the task definition
Version string
// Containers are the containers for the task
Containers []*apicontainer.Container
// Associations are the available associations for the task.
Associations []Association `json:"associations"`
// ResourcesMapUnsafe is the map of resource type to corresponding resources
ResourcesMapUnsafe resourcetype.ResourcesMap `json:"resources"`
// Volumes are the volumes for the task
Volumes []TaskVolume `json:"volumes"`
// CPU is a task-level limit for compute resources. A value of 1 means that
// the task may access 100% of 1 vCPU on the instance
CPU float64 `json:"Cpu,omitempty"`
// Memory is a task-level limit for memory resources in bytes
Memory int64 `json:"Memory,omitempty"`
// DesiredStatusUnsafe represents the state where the task should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler.
// The DesiredStatusUnsafe is almost always either apitaskstatus.TaskRunning or apitaskstatus.TaskStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `UpdateStatus`,
// `UpdateDesiredStatus`, `SetDesiredStatus`, and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe apitaskstatus.TaskStatus `json:"DesiredStatus"`
// KnownStatusUnsafe represents the state where the task is. This is generally
// the minimum of equivalent status types for the containers in the task;
// if one container is at ContainerRunning and another is at ContainerPulled,
// the task KnownStatusUnsafe would be TaskPulled.
// NOTE: Do not access KnownStatusUnsafe directly. Instead, use `UpdateStatus`,
// and `GetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe apitaskstatus.TaskStatus `json:"KnownStatus"`
// KnownStatusTimeUnsafe captures the time when the KnownStatusUnsafe was last updated.
// NOTE: Do not access KnownStatusTime directly, instead use `GetKnownStatusTime`.
KnownStatusTimeUnsafe time.Time `json:"KnownTime"`
// PullStartedAtUnsafe is the timestamp when the task start pulling the first container,
// it won't be set if the pull never happens
PullStartedAtUnsafe time.Time `json:"PullStartedAt"`
// PullStoppedAtUnsafe is the timestamp when the task finished pulling the last container,
// it won't be set if the pull never happens
PullStoppedAtUnsafe time.Time `json:"PullStoppedAt"`
// ExecutionStoppedAtUnsafe is the timestamp when the task desired status moved to stopped,
// which is when the any of the essential containers stopped
ExecutionStoppedAtUnsafe time.Time `json:"ExecutionStoppedAt"`
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS SubmitTaskStateChange API.
// TODO(samuelkarp) SentStatusUnsafe needs a lock and setters/getters.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
SentStatusUnsafe apitaskstatus.TaskStatus `json:"SentStatus"`
StartSequenceNumber int64
StopSequenceNumber int64
// ExecutionCredentialsID is the ID of credentials that are used by agent to
// perform some action at the task level, such as pulling image from ECR
ExecutionCredentialsID string `json:"executionCredentialsID"`
// credentialsID is used to set the CredentialsId field for the
// IAMRoleCredentials object associated with the task. This id can be
// used to look up the credentials for task in the credentials manager
credentialsID string
// ENI is the elastic network interface specified by this task
ENI *apieni.ENI
// MemoryCPULimitsEnabled to determine if task supports CPU, memory limits
MemoryCPULimitsEnabled bool `json:"MemoryCPULimitsEnabled,omitempty"`
// PlatformFields consists of fields specific to linux/windows for a task
PlatformFields PlatformFields `json:"PlatformFields,omitempty"`
// terminalReason should be used when we explicitly move a task to stopped.
// This ensures the task object carries some context for why it was explicitly
// stoppped.
terminalReason string
terminalReasonOnce sync.Once
// PIDMode is used to determine how PID namespaces are organized between
// containers of the Task
PIDMode string `json:"PidMode,omitempty"`
// IPCMode is used to determine how IPC resources should be shared among
// containers of the Task
IPCMode string `json:"IpcMode,omitempty"`
// lock is for protecting all fields in the task struct
lock sync.RWMutex
}
// TaskFromACS translates ecsacs.Task to apitask.Task by first marshaling the received
// ecsacs.Task to json and unmarshaling it as apitask.Task
func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) {
data, err := jsonutil.BuildJSON(acsTask)
if err != nil {
return nil, err
}
task := &Task{}
err = json.Unmarshal(data, task)
if err != nil {
return nil, err
}
if task.GetDesiredStatus() == apitaskstatus.TaskRunning && envelope.SeqNum != nil {
task.StartSequenceNumber = *envelope.SeqNum
} else if task.GetDesiredStatus() == apitaskstatus.TaskStopped && envelope.SeqNum != nil {
task.StopSequenceNumber = *envelope.SeqNum
}
// Overrides the container command if it's set
for _, container := range task.Containers {
if (container.Overrides != apicontainer.ContainerOverrides{}) && container.Overrides.Command != nil {
container.Command = *container.Overrides.Command
}
container.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
}
// initialize resources map for task
task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
return task, nil
}
// PostUnmarshalTask is run after a task has been unmarshalled, but before it has been
// run. It is possible it will be subsequently called after that and should be
// able to handle such an occurrence appropriately (e.g. behave idempotently).
func (task *Task) PostUnmarshalTask(cfg *config.Config,
credentialsManager credentials.Manager, resourceFields *taskresource.ResourceFields,
dockerClient dockerapi.DockerClient, ctx context.Context) error {
// TODO, add rudimentary plugin support and call any plugins that want to
// hook into this
task.adjustForPlatform(cfg)
if task.MemoryCPULimitsEnabled {
err := task.initializeCgroupResourceSpec(cfg.CgroupPath, resourceFields)
if err != nil {
seelog.Errorf("Task [%s]: could not intialize resource: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
}
if task.requiresASMDockerAuthData() {
task.initializeASMAuthResource(credentialsManager, resourceFields)
}
if task.requiresSSMSecret() {
task.initializeSSMSecretResource(credentialsManager, resourceFields)
}
err := task.initializeDockerLocalVolumes(dockerClient, ctx)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
err = task.initializeDockerVolumes(cfg.SharedVolumeMatchFullConfig, dockerClient, ctx)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
if cfg.GPUSupportEnabled {
err = task.addGPUResource()
if err != nil {
seelog.Errorf("Task [%s]: could not initialize GPU associations: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
}
task.initializeCredentialsEndpoint(credentialsManager)
task.initializeContainersV3MetadataEndpoint(utils.NewDynamicUUIDProvider())
task.addNetworkResourceProvisioningDependency(cfg)
// Adds necessary Pause containers for sharing PID or IPC namespaces
task.addNamespaceSharingProvisioningDependency(cfg)
return nil
}
func (task *Task) addGPUResource() error {
for _, association := range task.Associations {
// One GPU can be associated with only one container
// That is why validating if association.Containers is of length 1
if association.Type == GPUAssociationType {
if len(association.Containers) == 1 {
container, ok := task.ContainerByName(association.Containers[0])
if !ok {
return fmt.Errorf("could not find container with name %s for associating GPU %s",
association.Containers[0], association.Name)
} else {
container.GPUIDs = append(container.GPUIDs, association.Name)
}
} else {
return fmt.Errorf("could not associate multiple containers to GPU %s", association.Name)
}
}
}
task.populateGPUEnvironmentVariables()
return nil
}
func (task *Task) populateGPUEnvironmentVariables() {
for _, container := range task.Containers {
if len(container.GPUIDs) > 0 {
gpuList := strings.Join(container.GPUIDs, ",")
envVars := make(map[string]string)
envVars[NvidiaVisibleDevicesEnvVar] = gpuList
container.MergeEnvironmentVariables(envVars)
}
}
}
func (task *Task) shouldRequireNvidiaRuntime(container *apicontainer.Container) bool {
_, ok := container.Environment[NvidiaVisibleDevicesEnvVar]
return ok
}
func (task *Task) initializeDockerLocalVolumes(dockerClient dockerapi.DockerClient, ctx context.Context) error {
var requiredLocalVolumes []string
for _, container := range task.Containers {
for _, mountPoint := range container.MountPoints {
vol, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
continue
}
if localVolume, ok := vol.(*taskresourcevolume.LocalDockerVolume); ok {
localVolume.HostPath = task.volumeName(mountPoint.SourceVolume)
container.BuildResourceDependency(mountPoint.SourceVolume,
resourcestatus.ResourceStatus(taskresourcevolume.VolumeCreated),
apicontainerstatus.ContainerPulled)
requiredLocalVolumes = append(requiredLocalVolumes, mountPoint.SourceVolume)
}
}
}
if len(requiredLocalVolumes) == 0 {
// No need to create the auxiliary local driver volumes
return nil
}
// if we have required local volumes, create one with default local drive
for _, volumeName := range requiredLocalVolumes {
vol, _ := task.HostVolumeByName(volumeName)
// BUG(samuelkarp) On Windows, volumes with names that differ only by case will collide
scope := taskresourcevolume.TaskScope
localVolume, err := taskresourcevolume.NewVolumeResource(ctx, volumeName,
vol.Source(), scope, false,
taskresourcevolume.DockerLocalVolumeDriver,
make(map[string]string), make(map[string]string), dockerClient)
if err != nil {
return err
}
task.AddResource(resourcetype.DockerVolumeKey, localVolume)
}
return nil
}
func (task *Task) volumeName(name string) string {
return "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()
}
// initializeDockerVolumes checks the volume resource in the task to determine if the agent
// should create the volume before creating the container
func (task *Task) initializeDockerVolumes(sharedVolumeMatchFullConfig bool, dockerClient dockerapi.DockerClient, ctx context.Context) error {
for i, vol := range task.Volumes {
// No need to do this for non-docker volume, eg: host bind/empty volume
if vol.Type != DockerVolumeType {
continue
}
dockerVolume, ok := vol.Volume.(*taskresourcevolume.DockerVolumeConfig)
if !ok {
return errors.New("task volume: volume configuration does not match the type 'docker'")
}
// Agent needs to create task-scoped volume
if dockerVolume.Scope == taskresourcevolume.TaskScope {
err := task.addTaskScopedVolumes(ctx, dockerClient, &task.Volumes[i])
if err != nil {
return err
}
} else {
// Agent needs to create shared volume if that's auto provisioned
err := task.addSharedVolumes(sharedVolumeMatchFullConfig, ctx, dockerClient, &task.Volumes[i])
if err != nil {
return err
}
}
}
return nil
}
// addTaskScopedVolumes adds the task scoped volume into task resources and updates container dependency
func (task *Task) addTaskScopedVolumes(ctx context.Context, dockerClient dockerapi.DockerClient,
vol *TaskVolume) error {
volumeConfig := vol.Volume.(*taskresourcevolume.DockerVolumeConfig)
volumeResource, err := taskresourcevolume.NewVolumeResource(
ctx,
vol.Name,
task.volumeName(vol.Name),
volumeConfig.Scope, volumeConfig.Autoprovision,
volumeConfig.Driver, volumeConfig.DriverOpts,
volumeConfig.Labels, dockerClient)
if err != nil {
return err
}
vol.Volume = &volumeResource.VolumeConfig
task.AddResource(resourcetype.DockerVolumeKey, volumeResource)
task.updateContainerVolumeDependency(vol.Name)
return nil
}
// addSharedVolumes adds shared volume into task resources and updates container dependency
func (task *Task) addSharedVolumes(SharedVolumeMatchFullConfig bool, ctx context.Context, dockerClient dockerapi.DockerClient,
vol *TaskVolume) error {
volumeConfig := vol.Volume.(*taskresourcevolume.DockerVolumeConfig)
volumeConfig.DockerVolumeName = vol.Name
// if autoprovision == true, we will auto-provision the volume if it does not exist already
// else the named volume must exist
if !volumeConfig.Autoprovision {
volumeMetadata := dockerClient.InspectVolume(ctx, vol.Name, dockerapi.InspectVolumeTimeout)
if volumeMetadata.Error != nil {
return errors.Wrapf(volumeMetadata.Error, "initialize volume: volume detection failed, volume '%s' does not exist and autoprovision is set to false", vol.Name)
}
return nil
}
// at this point we know autoprovision = true
// check if the volume configuration matches the one exists on the instance
volumeMetadata := dockerClient.InspectVolume(ctx, volumeConfig.DockerVolumeName, dockerapi.InspectVolumeTimeout)
if volumeMetadata.Error != nil {
// Inspect the volume timed out, fail the task
if _, ok := volumeMetadata.Error.(*dockerapi.DockerTimeoutError); ok {
return volumeMetadata.Error
}
seelog.Infof("initialize volume: Task [%s]: non-autoprovisioned volume not found, adding to task resource %q", task.Arn, vol.Name)
// this resource should be created by agent
volumeResource, err := taskresourcevolume.NewVolumeResource(
ctx,
vol.Name,
vol.Name,
volumeConfig.Scope, volumeConfig.Autoprovision,
volumeConfig.Driver, volumeConfig.DriverOpts,
volumeConfig.Labels, dockerClient)
if err != nil {
return err
}
task.AddResource(resourcetype.DockerVolumeKey, volumeResource)
task.updateContainerVolumeDependency(vol.Name)
return nil
}
seelog.Infof("initialize volume: Task [%s]: volume [%s] already exists", task.Arn, volumeConfig.DockerVolumeName)
if !SharedVolumeMatchFullConfig {
seelog.Infof("initialize volume: Task [%s]: ECS_SHARED_VOLUME_MATCH_FULL_CONFIG is set to false and volume with name [%s] is found", task.Arn, volumeConfig.DockerVolumeName)
return nil
}
// validate all the volume metadata fields match to the configuration
if len(volumeMetadata.DockerVolume.Labels) == 0 && len(volumeMetadata.DockerVolume.Labels) == len(volumeConfig.Labels) {
seelog.Infof("labels are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName)
} else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Labels, volumeConfig.Labels) {
return errors.Errorf("intialize volume: non-autoprovisioned volume does not match existing volume labels: existing: %v, expected: %v",
volumeMetadata.DockerVolume.Labels, volumeConfig.Labels)
}
if len(volumeMetadata.DockerVolume.Options) == 0 && len(volumeMetadata.DockerVolume.Options) == len(volumeConfig.DriverOpts) {
seelog.Infof("driver options are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName)
} else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts) {
return errors.Errorf("initialize volume: non-autoprovisioned volume does not match existing volume options: existing: %v, expected: %v",
volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts)
}
// Right now we are not adding shared, autoprovision = true volume to task as resource if it already exists (i.e. when this task didn't create the volume).
// if we need to change that, make a call to task.AddResource here.
return nil
}
// updateContainerVolumeDependency adds the volume resource to container dependency
func (task *Task) updateContainerVolumeDependency(name string) {
// Find all the container that depends on the volume
for _, container := range task.Containers {
for _, mountpoint := range container.MountPoints {
if mountpoint.SourceVolume == name {
container.BuildResourceDependency(name,
resourcestatus.ResourceCreated,
apicontainerstatus.ContainerPulled)
}
}
}
}
// initializeCredentialsEndpoint sets the credentials endpoint for all containers in a task if needed.
func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.Manager) {
id := task.GetCredentialsID()
if id == "" {
// No credentials set for the task. Do not inject the endpoint environment variable.
return
}
taskCredentials, ok := credentialsManager.GetTaskCredentials(id)
if !ok {
// Task has credentials id set, but credentials manager is unaware of
// the id. This should never happen as the payload handler sets
// credentialsId for the task after adding credentials to the
// credentials manager
seelog.Errorf("Unable to get credentials for task: %s", task.Arn)
return
}
credentialsEndpointRelativeURI := taskCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI()
for _, container := range task.Containers {
// container.Environment map would not be initialized if there are
// no environment variables to be set or overridden in the container
// config. Check if that's the case and initilialize if needed
if container.Environment == nil {
container.Environment = make(map[string]string)
}
container.Environment[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] = credentialsEndpointRelativeURI
}
}
// initializeContainersV3MetadataEndpoint generates an v3 endpoint id for each container, constructs the
// v3 metadata endpoint, and injects it as an environment variable
func (task *Task) initializeContainersV3MetadataEndpoint(uuidProvider utils.UUIDProvider) {
for _, container := range task.Containers {
v3EndpointID := container.GetV3EndpointID()
if v3EndpointID == "" { // if container's v3 endpoint has not been set
container.SetV3EndpointID(uuidProvider.New())
}
container.InjectV3MetadataEndpoint()
}
}
// requiresASMDockerAuthData returns true if atleast one container in the task
// needs to retrieve private registry authentication data from ASM
func (task *Task) requiresASMDockerAuthData() bool {
for _, container := range task.Containers {
if container.ShouldPullWithASMAuth() {
return true
}
}
return false
}
// initializeASMAuthResource builds the resource dependency map for the ASM auth resource
func (task *Task) initializeASMAuthResource(credentialsManager credentials.Manager,
resourceFields *taskresource.ResourceFields) {
asmAuthResource := asmauth.NewASMAuthResource(task.Arn, task.getAllASMAuthDataRequirements(),
task.ExecutionCredentialsID, credentialsManager, resourceFields.ASMClientCreator)
task.AddResource(asmauth.ResourceName, asmAuthResource)
for _, container := range task.Containers {
if container.ShouldPullWithASMAuth() {
container.BuildResourceDependency(asmAuthResource.GetName(),
resourcestatus.ResourceStatus(asmauth.ASMAuthStatusCreated),
apicontainerstatus.ContainerPulled)
}
}
}
func (task *Task) getAllASMAuthDataRequirements() []*apicontainer.ASMAuthData {
var reqs []*apicontainer.ASMAuthData
for _, container := range task.Containers {
if container.ShouldPullWithASMAuth() {
reqs = append(reqs, container.RegistryAuthentication.ASMAuthData)
}
}
return reqs
}
// requiresSSMSecret returns true if at least one container in the task
// needs to retrieve secret from SSM parameter
func (task *Task) requiresSSMSecret() bool {
for _, container := range task.Containers {
if container.ShouldCreateWithSSMSecret() {
return true
}
}
return false
}
// initializeSSMSecretResource builds the resource dependency map for the SSM ssmsecret resource
func (task *Task) initializeSSMSecretResource(credentialsManager credentials.Manager,
resourceFields *taskresource.ResourceFields) {
ssmSecretResource := ssmsecret.NewSSMSecretResource(task.Arn, task.getAllSSMSecretRequirements(),
task.ExecutionCredentialsID, credentialsManager, resourceFields.SSMClientCreator)
task.AddResource(ssmsecret.ResourceName, ssmSecretResource)
// for every container that needs ssm secret vending as env, it needs to wait all secrets got retrieved
for _, container := range task.Containers {
if container.ShouldCreateWithSSMSecret() {
container.BuildResourceDependency(ssmSecretResource.GetName(),
resourcestatus.ResourceStatus(ssmsecret.SSMSecretCreated),
apicontainerstatus.ContainerCreated)
}
}
}
// getAllSSMSecretRequirements stores all secrets in a map whose key is region and value is all
// secrets in that region
func (task *Task) getAllSSMSecretRequirements() map[string][]apicontainer.Secret {
reqs := make(map[string][]apicontainer.Secret)
for _, container := range task.Containers {
for _, secret := range container.Secrets {
if secret.Provider == apicontainer.SecretProviderSSM {
if _, ok := reqs[secret.Region]; !ok {
reqs[secret.Region] = []apicontainer.Secret{}
}
reqs[secret.Region] = append(reqs[secret.Region], secret)
}
}
}
return reqs
}
// BuildCNIConfig constructs the cni configuration from eni
func (task *Task) BuildCNIConfig() (*ecscni.Config, error) {
if !task.isNetworkModeVPC() {
return nil, errors.New("task config: task has no ENIs associated with it, unable to generate cni config")
}
cfg := &ecscni.Config{}
eni := task.GetTaskENI()
cfg.ENIID = eni.ID
cfg.ID = eni.MacAddress
cfg.ENIMACAddress = eni.MacAddress
cfg.SubnetGatewayIPV4Address = eni.GetSubnetGatewayIPV4Address()
for _, ipv4 := range eni.IPV4Addresses {
if ipv4.Primary {
cfg.ENIIPV4Address = ipv4.Address
break
}
}
// If there is ipv6 assigned to eni then set it
if len(eni.IPV6Addresses) > 0 {
cfg.ENIIPV6Address = eni.IPV6Addresses[0].Address
}
return cfg, nil
}
// isNetworkModeVPC checks if the task is configured to use task-networking feature
func (task *Task) isNetworkModeVPC() bool {
if task.GetTaskENI() == nil {
return false
}
return true
}
func (task *Task) addNetworkResourceProvisioningDependency(cfg *config.Config) {
if !task.isNetworkModeVPC() {
return
}
pauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerResourcesProvisioned)
pauseContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
pauseContainer.Name = NetworkPauseContainerName
pauseContainer.Image = fmt.Sprintf("%s:%s", cfg.PauseContainerImageName, cfg.PauseContainerTag)
pauseContainer.Essential = true
pauseContainer.Type = apicontainer.ContainerCNIPause
task.Containers = append(task.Containers, pauseContainer)
for _, container := range task.Containers {
if container.IsInternal() {
continue
}
container.BuildContainerDependency(NetworkPauseContainerName, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerPulled)
pauseContainer.BuildContainerDependency(container.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped)
}
}
func (task *Task) addNamespaceSharingProvisioningDependency(cfg *config.Config) {
// Pause container does not need to be created if no namespace sharing will be done at task level
if task.getIPCMode() != ipcModeTask && task.getPIDMode() != pidModeTask {
return
}
namespacePauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerRunning)
namespacePauseContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
namespacePauseContainer.Name = NamespacePauseContainerName
namespacePauseContainer.Image = fmt.Sprintf("%s:%s", config.DefaultPauseContainerImageName, config.DefaultPauseContainerTag)
namespacePauseContainer.Essential = true
namespacePauseContainer.Type = apicontainer.ContainerNamespacePause
task.Containers = append(task.Containers, namespacePauseContainer)
for _, container := range task.Containers {
if container.IsInternal() {
continue
}
container.BuildContainerDependency(NamespacePauseContainerName, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled)
namespacePauseContainer.BuildContainerDependency(container.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped)
}
}
// ContainerByName returns the *Container for the given name
func (task *Task) ContainerByName(name string) (*apicontainer.Container, bool) {
for _, container := range task.Containers {
if container.Name == name {
return container, true
}
}
return nil, false
}
// HostVolumeByName returns the task Volume for the given a volume name in that
// task. The second return value indicates the presence of that volume
func (task *Task) HostVolumeByName(name string) (taskresourcevolume.Volume, bool) {
for _, v := range task.Volumes {
if v.Name == name {
return v.Volume, true
}
}
return nil, false
}
// UpdateMountPoints updates the mount points of volumes that were created
// without specifying a host path. This is used as part of the empty host
// volume feature.
func (task *Task) UpdateMountPoints(cont *apicontainer.Container, vols []types.MountPoint) {
for _, mountPoint := range cont.MountPoints {
containerPath := getCanonicalPath(mountPoint.ContainerPath)
for _, vol := range vols {
if strings.Compare(vol.Destination, containerPath) == 0 ||
// /path/ -> /path or \path\ -> \path
strings.Compare(vol.Destination, strings.TrimRight(containerPath, string(filepath.Separator))) == 0 {
if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists {
if empty, ok := hostVolume.(*taskresourcevolume.LocalDockerVolume); ok {
empty.HostPath = vol.Source
}
}
}
}
}
}
// updateTaskKnownStatus updates the given task's status based on its container's status.
// It updates to the minimum of all containers no matter what
// It returns a TaskStatus indicating what change occurred or TaskStatusNone if
// there was no change
// Invariant: task known status is the minimum of container known status
func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) {
seelog.Debugf("Updating task's known status, task: %s", task.String())
// Set to a large 'impossible' status that can't be the min
containerEarliestKnownStatus := apicontainerstatus.ContainerZombie
var earliestKnownStatusContainer *apicontainer.Container
essentialContainerStopped := false
for _, container := range task.Containers {
containerKnownStatus := container.GetKnownStatus()
if containerKnownStatus == apicontainerstatus.ContainerStopped && container.Essential {
essentialContainerStopped = true
}
if containerKnownStatus < containerEarliestKnownStatus {
containerEarliestKnownStatus = containerKnownStatus
earliestKnownStatusContainer = container
}
}
if earliestKnownStatusContainer == nil {
seelog.Criticalf(
"Impossible state found while updating tasks's known status, earliest state recorded as %s for task [%v]",
containerEarliestKnownStatus.String(), task)
return apitaskstatus.TaskStatusNone
}
seelog.Debugf("Container with earliest known container is [%s] for task: %s",
earliestKnownStatusContainer.String(), task.String())
// If the essential container is stopped while other containers may be running
// don't update the task status until the other containers are stopped.
if earliestKnownStatusContainer.IsKnownSteadyState() && essentialContainerStopped {
seelog.Debugf(
"Essential container is stopped while other containers are running, not updating task status for task: %s",
task.String())
return apitaskstatus.TaskStatusNone
}
// We can't rely on earliest container known status alone for determining if the
// task state needs to be updated as containers can have different steady states
// defined. Instead we should get the task status for all containers' known
// statuses and compute the min of this
earliestKnownTaskStatus := task.getEarliestKnownTaskStatusForContainers()
if task.GetKnownStatus() < earliestKnownTaskStatus {
seelog.Debugf("Updating task's known status to: %s, task: %s",
earliestKnownTaskStatus.String(), task.String())
task.SetKnownStatus(earliestKnownTaskStatus)
return task.GetKnownStatus()
}
return apitaskstatus.TaskStatusNone
}
// getEarliestKnownTaskStatusForContainers gets the lowest (earliest) task status
// based on the known statuses of all containers in the task
func (task *Task) getEarliestKnownTaskStatusForContainers() apitaskstatus.TaskStatus {
if len(task.Containers) == 0 {
seelog.Criticalf("No containers in the task: %s", task.String())
return apitaskstatus.TaskStatusNone
}
// Set earliest container status to an impossible to reach 'high' task status
earliest := apitaskstatus.TaskZombie
for _, container := range task.Containers {
containerTaskStatus := apitaskstatus.MapContainerToTaskStatus(container.GetKnownStatus(), container.GetSteadyStateStatus())
if containerTaskStatus < earliest {
earliest = containerTaskStatus
}
}
return earliest
}
// DockerConfig converts the given container in this task to the format of
// the Docker SDK 'Config' struct
func (task *Task) DockerConfig(container *apicontainer.Container, apiVersion dockerclient.DockerVersion) (*dockercontainer.Config, *apierrors.DockerClientConfigError) {
return task.dockerConfig(container, apiVersion)
}
func (task *Task) dockerConfig(container *apicontainer.Container, apiVersion dockerclient.DockerVersion) (*dockercontainer.Config, *apierrors.DockerClientConfigError) {
dockerEnv := make([]string, 0, len(container.Environment))
for envKey, envVal := range container.Environment {
dockerEnv = append(dockerEnv, envKey+"="+envVal)
}
var entryPoint []string
if container.EntryPoint != nil {
entryPoint = *container.EntryPoint
}
containerConfig := &dockercontainer.Config{
Image: container.Image,
Cmd: container.Command,
Entrypoint: entryPoint,
ExposedPorts: task.dockerExposedPorts(container),
Env: dockerEnv,
}
if container.DockerConfig.Config != nil {
err := json.Unmarshal([]byte(aws.StringValue(container.DockerConfig.Config)), &containerConfig)
if err != nil {
return nil, &apierrors.DockerClientConfigError{"Unable decode given docker config: " + err.Error()}
}
}
if container.HealthCheckType == apicontainer.DockerHealthCheckType && containerConfig.Healthcheck == nil {
return nil, &apierrors.DockerClientConfigError{
"docker health check is nil while container health check type is DOCKER"}
}
if containerConfig.Labels == nil {
containerConfig.Labels = make(map[string]string)
}
if container.Type == apicontainer.ContainerCNIPause {
// apply hostname to pause container's docker config
return task.applyENIHostname(containerConfig), nil
}
return containerConfig, nil
}
func (task *Task) dockerExposedPorts(container *apicontainer.Container) nat.PortSet {
dockerExposedPorts := make(map[nat.Port]struct{})
for _, portBinding := range container.Ports {
dockerPort := nat.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
dockerExposedPorts[dockerPort] = struct{}{}
}
return dockerExposedPorts
}
// DockerHostConfig construct the configuration recognized by docker
func (task *Task) DockerHostConfig(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer, apiVersion dockerclient.DockerVersion) (*dockercontainer.HostConfig, *apierrors.HostConfigError) {
return task.dockerHostConfig(container, dockerContainerMap, apiVersion)
}
// ApplyExecutionRoleLogsAuth will check whether the task has execution role
// credentials, and add the genereated credentials endpoint to the associated HostConfig
func (task *Task) ApplyExecutionRoleLogsAuth(hostConfig *dockercontainer.HostConfig, credentialsManager credentials.Manager) *apierrors.HostConfigError {
id := task.GetExecutionCredentialsID()
if id == "" {
// No execution credentials set for the task. Do not inject the endpoint environment variable.
return &apierrors.HostConfigError{"No execution credentials set for the task"}
}
executionRoleCredentials, ok := credentialsManager.GetTaskCredentials(id)
if !ok {
// Task has credentials id set, but credentials manager is unaware of
// the id. This should never happen as the payload handler sets
// credentialsId for the task after adding credentials to the
// credentials manager
return &apierrors.HostConfigError{"Unable to get execution role credentials for task"}
}
credentialsEndpointRelativeURI := executionRoleCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI()
hostConfig.LogConfig.Config[awslogsCredsEndpointOpt] = credentialsEndpointRelativeURI
return nil
}
func (task *Task) dockerHostConfig(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer, apiVersion dockerclient.DockerVersion) (*dockercontainer.HostConfig, *apierrors.HostConfigError) {
dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap)
if err != nil {
return nil, &apierrors.HostConfigError{err.Error()}
}
dockerPortMap := task.dockerPortMap(container)
volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap)
if err != nil {
return nil, &apierrors.HostConfigError{err.Error()}
}
binds, err := task.dockerHostBinds(container)
if err != nil {
return nil, &apierrors.HostConfigError{err.Error()}
}
resources := task.getDockerResources(container)
// Populate hostConfig
hostConfig := &dockercontainer.HostConfig{
Links: dockerLinkArr,
Binds: binds,
PortBindings: dockerPortMap,
VolumesFrom: volumesFrom,
Resources: resources,
}
if task.shouldRequireNvidiaRuntime(container) {
seelog.Debugf("Setting runtime as nvidia for container %s", container.Name)
hostConfig.Runtime = NvidiaRuntime
}
if container.DockerConfig.HostConfig != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig)
if err != nil {
return nil, &apierrors.HostConfigError{"Unable to decode given host config: " + err.Error()}
}
}
err = task.platformHostConfigOverride(hostConfig)
if err != nil {
return nil, &apierrors.HostConfigError{err.Error()}
}
// Determine if network mode should be overridden and override it if needed
ok, networkMode := task.shouldOverrideNetworkMode(container, dockerContainerMap)
if ok {
hostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode)
// Override 'awsvpc' parameters if needed
if container.Type == apicontainer.ContainerCNIPause {
// apply ExtraHosts to HostConfig for pause container
if hosts := task.generateENIExtraHosts(); hosts != nil {
hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, hosts...)
}
// Override the DNS settings for the pause container if ENI has custom
// DNS settings
return task.overrideDNS(hostConfig), nil
}
}
ok, pidMode := task.shouldOverridePIDMode(container, dockerContainerMap)
if ok {
hostConfig.PidMode = dockercontainer.PidMode(pidMode)
}
ok, ipcMode := task.shouldOverrideIPCMode(container, dockerContainerMap)
if ok {
hostConfig.IpcMode = dockercontainer.IpcMode(ipcMode)
}
return hostConfig, nil
}
// Requires an *apicontainer.Container and returns the Resources for the HostConfig struct
func (task *Task) getDockerResources(container *apicontainer.Container) dockercontainer.Resources {
// Convert MB to B and set Memory
dockerMem := int64(container.Memory * 1024 * 1024)
if dockerMem != 0 && dockerMem < apicontainer.DockerContainerMinimumMemoryInBytes {
seelog.Warnf("Task %s container %s memory setting is too low, increasing to %d bytes",
task.Arn, container.Name, apicontainer.DockerContainerMinimumMemoryInBytes)
dockerMem = apicontainer.DockerContainerMinimumMemoryInBytes
}
// Set CPUShares
cpuShare := task.dockerCPUShares(container.CPU)
resources := dockercontainer.Resources{
Memory: dockerMem,
CPUShares: cpuShare,
}
return resources
}
// shouldOverrideNetworkMode returns true if the network mode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverrideNetworkMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) {
// TODO. We can do an early return here by determining which kind of task it is
// Example: Does this task have ENIs in its payload, what is its networking mode etc
if container.IsInternal() {
// If it's an internal container, set the network mode to none.
// Currently, internal containers are either for creating empty host
// volumes or for creating the 'pause' container. Both of these
// only need the network mode to be set to "none"
return true, networkModeNone
}
// For other types of containers, determine if the container map contains
// a pause container. Since a pause container is only added to the task
// when using non docker daemon supported network modes, its existence
// indicates the need to configure the network mode outside of supported
// network drivers
if task.GetTaskENI() == nil {
return false, ""
}
pauseContName := ""
for _, cont := range task.Containers {
if cont.Type == apicontainer.ContainerCNIPause {
pauseContName = cont.Name
break
}
}
if pauseContName == "" {
seelog.Critical("Pause container required, but not found in the task: %s", task.String())
return false, ""
}
pauseContainer, ok := dockerContainerMap[pauseContName]
if !ok || pauseContainer == nil {
// This should never be the case and implies a code-bug.
seelog.Criticalf("Pause container required, but not found in container map for container: [%s] in task: %s",
container.String(), task.String())
return false, ""
}
return true, dockerMappingContainerPrefix + pauseContainer.DockerID
}
// overrideDNS overrides a container's host config if the following conditions are
// true:
// 1. Task has an ENI associated with it
// 2. ENI has custom DNS IPs and search list associated with it
// This should only be done for the pause container as other containers inherit
// /etc/resolv.conf of this container (they share the network namespace)
func (task *Task) overrideDNS(hostConfig *dockercontainer.HostConfig) *dockercontainer.HostConfig {
eni := task.GetTaskENI()
if eni == nil {
return hostConfig
}
hostConfig.DNS = eni.DomainNameServers
hostConfig.DNSSearch = eni.DomainNameSearchList
return hostConfig
}
// applyENIHostname adds the hostname provided by the ENI message to the
// container's docker config. At the time of implmentation, we are only using it
// to configure the pause container for awsvpc tasks
func (task *Task) applyENIHostname(dockerConfig *dockercontainer.Config) *dockercontainer.Config {
eni := task.GetTaskENI()
if eni == nil {
return dockerConfig
}
hostname := eni.GetHostname()
if hostname == "" {
return dockerConfig
}
dockerConfig.Hostname = hostname
return dockerConfig
}
// generateENIExtraHosts returns a slice of strings of the form "hostname:ip"
// that is generated using the hostname and ip addresses allocated to the ENI
func (task *Task) generateENIExtraHosts() []string {
eni := task.GetTaskENI()
if eni == nil {
return nil
}
hostname := eni.GetHostname()
if hostname == "" {
return nil
}
extraHosts := []string{}
for _, ip := range eni.GetIPV4Addresses() {
host := fmt.Sprintf("%s:%s", hostname, ip)
extraHosts = append(extraHosts, host)
}
return extraHosts
}
// shouldOverridePIDMode returns true if the PIDMode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverridePIDMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) {
// If the container is an internal container (ContainerEmptyHostVolume,
// ContainerCNIPause, or ContainerNamespacePause), then PID namespace for
// the container itself should be private (default Docker option)
if container.IsInternal() {
return false, ""
}
switch task.getPIDMode() {
case pidModeHost:
return true, pidModeHost
case pidModeTask:
pauseCont, ok := task.ContainerByName(NamespacePauseContainerName)
if !ok {
seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
pauseDockerID, ok := dockerContainerMap[pauseCont.Name]
if !ok || pauseDockerID == nil {
// Docker container shouldn't be nil or not exist if the Container definition within task exists; implies code-bug
seelog.Criticalf("Namespace Pause docker container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
return true, dockerMappingContainerPrefix + pauseDockerID.DockerID
// If PIDMode is not Host or Task, then no need to override
default:
return false, ""
}
}
// shouldOverrideIPCMode returns true if the IPCMode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverrideIPCMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) {
// All internal containers do not need the same IPCMode. The NamespaceContainerPause
// needs to be "shareable" if ipcMode is "task". All other internal containers should
// defer to the Docker daemon default option (either shareable or private depending on
// version and configuration)
if container.IsInternal() {
if container.Type == apicontainer.ContainerNamespacePause {
// Setting NamespaceContainerPause to be sharable with other containers
if task.getIPCMode() == ipcModeTask {
return true, ipcModeSharable
}
}
// Defaulting to Docker daemon default option
return false, ""
}
switch task.getIPCMode() {
// No IPCMode provided in Task Definition, no need to override
case "":
return false, ""
// IPCMode is none - container will have own private namespace with /dev/shm not mounted
case ipcModeNone:
return true, ipcModeNone
case ipcModeHost:
return true, ipcModeHost
case ipcModeTask:
pauseCont, ok := task.ContainerByName(NamespacePauseContainerName)
if !ok {
seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
pauseDockerID, ok := dockerContainerMap[pauseCont.Name]
if !ok || pauseDockerID == nil {
// Docker container shouldn't be nill or not exist if the Container definition within task exists; implies code-bug
seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
return true, dockerMappingContainerPrefix + pauseDockerID.DockerID
default:
return false, ""
}
}
func (task *Task) dockerLinks(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) ([]string, error) {
dockerLinkArr := make([]string, len(container.Links))
for i, link := range container.Links {
linkParts := strings.Split(link, ":")
if len(linkParts) > 2 {
return []string{}, errors.New("Invalid link format")
}
linkName := linkParts[0]
var linkAlias string
if len(linkParts) == 2 {
linkAlias = linkParts[1]
} else {
seelog.Warnf("Link name [%s] found with no linkalias for container: [%s] in task: [%s]",
linkName, container.String(), task.String())
linkAlias = linkName
}
targetContainer, ok := dockerContainerMap[linkName]
if !ok {
return []string{}, errors.New("Link target not available: " + linkName)
}
dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias
}
return dockerLinkArr, nil
}
func (task *Task) dockerPortMap(container *apicontainer.Container) nat.PortMap {
dockerPortMap := nat.PortMap{}
for _, portBinding := range container.Ports {
dockerPort := nat.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
currentMappings, existing := dockerPortMap[dockerPort]
if existing {
dockerPortMap[dockerPort] = append(currentMappings, nat.PortBinding{HostPort: strconv.Itoa(int(portBinding.HostPort))})
} else {
dockerPortMap[dockerPort] = []nat.PortBinding{{HostPort: strconv.Itoa(int(portBinding.HostPort))}}
}
}
return dockerPortMap
}
func (task *Task) dockerVolumesFrom(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) ([]string, error) {
volumesFrom := make([]string, len(container.VolumesFrom))
for i, volume := range container.VolumesFrom {
targetContainer, ok := dockerContainerMap[volume.SourceContainer]
if !ok {
return []string{}, errors.New("Volume target not available: " + volume.SourceContainer)
}
if volume.ReadOnly {
volumesFrom[i] = targetContainer.DockerName + ":ro"
} else {
volumesFrom[i] = targetContainer.DockerName
}
}
return volumesFrom, nil
}
func (task *Task) dockerHostBinds(container *apicontainer.Container) ([]string, error) {
if container.Name == emptyHostVolumeName {
// emptyHostVolumes are handled as a special case in config, not
// hostConfig
return []string{}, nil
}
binds := make([]string, len(container.MountPoints))
for i, mountPoint := range container.MountPoints {
hv, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume)
}
if hv.Source() == "" || mountPoint.ContainerPath == "" {
seelog.Errorf(
"Unable to resolve volume mounts for container [%s]; invalid path: [%s]; [%s] -> [%s] in task: [%s]",
container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath, task.String())
return []string{}, errors.Errorf("Unable to resolve volume mounts; invalid path: %s %s; %s -> %s",
container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath)
}
bind := hv.Source() + ":" + mountPoint.ContainerPath
if mountPoint.ReadOnly {
bind += ":ro"
}
binds[i] = bind
}
return binds, nil
}
// UpdateStatus updates a task's known and desired statuses to be compatible
// with all of its containers
// It will return a bool indicating if there was a change
func (task *Task) UpdateStatus() bool {
change := task.updateTaskKnownStatus()
// DesiredStatus can change based on a new known status
task.UpdateDesiredStatus()
return change != apitaskstatus.TaskStatusNone
}
// UpdateDesiredStatus sets the known status of the task
func (task *Task) UpdateDesiredStatus() {
task.lock.Lock()
defer task.lock.Unlock()
task.updateTaskDesiredStatusUnsafe()
task.updateContainerDesiredStatusUnsafe(task.DesiredStatusUnsafe)
task.updateResourceDesiredStatusUnsafe(task.DesiredStatusUnsafe)
}
// updateTaskDesiredStatusUnsafe determines what status the task should properly be at based on the containers' statuses
// Invariant: task desired status must be stopped if any essential container is stopped
func (task *Task) updateTaskDesiredStatusUnsafe() {
seelog.Debugf("Updating task: [%s]", task.stringUnsafe())
// A task's desired status is stopped if any essential container is stopped
// Otherwise, the task's desired status is unchanged (typically running, but no need to change)
for _, cont := range task.Containers {
if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) {
seelog.Debugf("Updating task desired status to stopped because of container: [%s]; task: [%s]",
cont.Name, task.stringUnsafe())
task.DesiredStatusUnsafe = apitaskstatus.TaskStopped
}
}
}
// updateContainerDesiredStatusUnsafe sets all container's desired status's to the
// task's desired status
// Invariant: container desired status is <= task desired status converted to container status
// Note: task desired status and container desired status is typically only RUNNING or STOPPED
func (task *Task) updateContainerDesiredStatusUnsafe(taskDesiredStatus apitaskstatus.TaskStatus) {
for _, container := range task.Containers {
taskDesiredStatusToContainerStatus := apitaskstatus.MapTaskToContainerStatus(taskDesiredStatus, container.GetSteadyStateStatus())
if container.GetDesiredStatus() < taskDesiredStatusToContainerStatus {
container.SetDesiredStatus(taskDesiredStatusToContainerStatus)
}
}
}
// updateResourceDesiredStatusUnsafe sets all resources' desired status depending on the
// task's desired status
// TODO: Create a mapping of resource status to the corresponding task status and use it here
func (task *Task) updateResourceDesiredStatusUnsafe(taskDesiredStatus apitaskstatus.TaskStatus) {
resources := task.getResourcesUnsafe()
for _, r := range resources {
if taskDesiredStatus == apitaskstatus.TaskRunning {
if r.GetDesiredStatus() < r.SteadyState() {
r.SetDesiredStatus(r.SteadyState())
}
} else {
if r.GetDesiredStatus() < r.TerminalStatus() {
r.SetDesiredStatus(r.TerminalStatus())
}
}
}
}
// SetKnownStatus sets the known status of the task
func (task *Task) SetKnownStatus(status apitaskstatus.TaskStatus) {
task.setKnownStatus(status)
task.updateKnownStatusTime()
}
func (task *Task) setKnownStatus(status apitaskstatus.TaskStatus) {
task.lock.Lock()
defer task.lock.Unlock()
task.KnownStatusUnsafe = status
}
func (task *Task) updateKnownStatusTime() {
task.lock.Lock()
defer task.lock.Unlock()
task.KnownStatusTimeUnsafe = ttime.Now()
}
// GetKnownStatus gets the KnownStatus of the task
func (task *Task) GetKnownStatus() apitaskstatus.TaskStatus {
task.lock.RLock()
defer task.lock.RUnlock()
return task.KnownStatusUnsafe
}
// GetKnownStatusTime gets the KnownStatusTime of the task
func (task *Task) GetKnownStatusTime() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.KnownStatusTimeUnsafe
}
// SetCredentialsID sets the credentials ID for the task
func (task *Task) SetCredentialsID(id string) {
task.lock.Lock()
defer task.lock.Unlock()
task.credentialsID = id
}
// GetCredentialsID gets the credentials ID for the task
func (task *Task) GetCredentialsID() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.credentialsID
}
// SetExecutionRoleCredentialsID sets the ID for the task execution role credentials
func (task *Task) SetExecutionRoleCredentialsID(id string) {
task.lock.Lock()
defer task.lock.Unlock()
task.ExecutionCredentialsID = id
}
// GetExecutionCredentialsID gets the credentials ID for the task
func (task *Task) GetExecutionCredentialsID() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.ExecutionCredentialsID
}
// GetDesiredStatus gets the desired status of the task
func (task *Task) GetDesiredStatus() apitaskstatus.TaskStatus {
task.lock.RLock()
defer task.lock.RUnlock()
return task.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the task
func (task *Task) SetDesiredStatus(status apitaskstatus.TaskStatus) {
task.lock.Lock()
defer task.lock.Unlock()
task.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatus of the task
func (task *Task) GetSentStatus() apitaskstatus.TaskStatus {
task.lock.RLock()
defer task.lock.RUnlock()
return task.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatus of the task
func (task *Task) SetSentStatus(status apitaskstatus.TaskStatus) {
task.lock.Lock()
defer task.lock.Unlock()
task.SentStatusUnsafe = status
}
// SetTaskENI sets the eni information of the task
func (task *Task) SetTaskENI(eni *apieni.ENI) {
task.lock.Lock()
defer task.lock.Unlock()
task.ENI = eni
}
// GetTaskENI returns the eni of task, for now task can only have one enis
func (task *Task) GetTaskENI() *apieni.ENI {
task.lock.RLock()
defer task.lock.RUnlock()
return task.ENI
}
// GetStopSequenceNumber returns the stop sequence number of a task
func (task *Task) GetStopSequenceNumber() int64 {
task.lock.RLock()
defer task.lock.RUnlock()
return task.StopSequenceNumber
}
// SetStopSequenceNumber sets the stop seqence number of a task
func (task *Task) SetStopSequenceNumber(seqnum int64) {
task.lock.Lock()
defer task.lock.Unlock()
task.StopSequenceNumber = seqnum
}
// SetPullStartedAt sets the task pullstartedat timestamp and returns whether
// this field was updated or not
func (task *Task) SetPullStartedAt(timestamp time.Time) bool {
task.lock.Lock()
defer task.lock.Unlock()
// Only set this field if it is not set
if task.PullStartedAtUnsafe.IsZero() {
task.PullStartedAtUnsafe = timestamp
return true
}
return false
}
// GetPullStartedAt returns the PullStartedAt timestamp
func (task *Task) GetPullStartedAt() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.PullStartedAtUnsafe
}
// SetPullStoppedAt sets the task pullstoppedat timestamp
func (task *Task) SetPullStoppedAt(timestamp time.Time) {
task.lock.Lock()
defer task.lock.Unlock()
task.PullStoppedAtUnsafe = timestamp
}
// GetPullStoppedAt returns the PullStoppedAt timestamp
func (task *Task) GetPullStoppedAt() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.PullStoppedAtUnsafe
}
// SetExecutionStoppedAt sets the ExecutionStoppedAt timestamp of the task
func (task *Task) SetExecutionStoppedAt(timestamp time.Time) bool {
task.lock.Lock()
defer task.lock.Unlock()
if task.ExecutionStoppedAtUnsafe.IsZero() {
task.ExecutionStoppedAtUnsafe = timestamp
return true
}
return false
}
// GetExecutionStoppedAt returns the task executionStoppedAt timestamp
func (task *Task) GetExecutionStoppedAt() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.ExecutionStoppedAtUnsafe
}
// String returns a human readable string representation of this object
func (task *Task) String() string {
task.lock.Lock()
defer task.lock.Unlock()
return task.stringUnsafe()
}
// stringUnsafe returns a human readable string representation of this object
func (task *Task) stringUnsafe() string {
res := fmt.Sprintf("%s:%s %s, TaskStatus: (%s->%s)",
task.Family, task.Version, task.Arn,
task.KnownStatusUnsafe.String(), task.DesiredStatusUnsafe.String())
res += " Containers: ["
for _, container := range task.Containers {
res += fmt.Sprintf("%s (%s->%s),", container.Name, container.GetKnownStatus().String(), container.GetDesiredStatus().String())
}
if task.ENI != nil {
res += fmt.Sprintf(" ENI: [%s]", task.ENI.String())
}
return res + "]"
}
// GetID is used to retrieve the taskID from taskARN
// Reference: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-ecs
func (task *Task) GetID() (string, error) {
// Parse taskARN
parsedARN, err := arn.Parse(task.Arn)
if err != nil {
return "", errors.Wrapf(err, "task get-id: malformed taskARN: %s", task.Arn)
}
// Get task resource section
resource := parsedARN.Resource
if !strings.Contains(resource, arnResourceDelimiter) {
return "", errors.Errorf("task get-id: malformed task resource: %s", resource)
}
resourceSplit := strings.SplitN(resource, arnResourceDelimiter, arnResourceSections)
if len(resourceSplit) != arnResourceSections {
return "", errors.Errorf(
"task get-id: invalid task resource split: %s, expected=%d, actual=%d",
resource, arnResourceSections, len(resourceSplit))
}
return resourceSplit[1], nil
}
// RecordExecutionStoppedAt checks if this is an essential container stopped
// and set the task executionStoppedAt timestamps
func (task *Task) RecordExecutionStoppedAt(container *apicontainer.Container) {
if !container.Essential {
return
}
if container.GetKnownStatus() != apicontainerstatus.ContainerStopped {
return
}
// If the essential container is stopped, set the ExecutionStoppedAt timestamp
now := time.Now()
ok := task.SetExecutionStoppedAt(now)
if !ok {
// ExecutionStoppedAt was already recorded. Nothing to left to do here
return
}
seelog.Infof("Task [%s]: recording execution stopped time. Essential container [%s] stopped at: %s",
task.Arn, container.Name, now.String())
}
// GetResources returns the list of task resources from ResourcesMap
func (task *Task) GetResources() []taskresource.TaskResource {
task.lock.RLock()
defer task.lock.RUnlock()
return task.getResourcesUnsafe()
}
// getResourcesUnsafe returns the list of task resources from ResourcesMap
func (task *Task) getResourcesUnsafe() []taskresource.TaskResource {
var resourceList []taskresource.TaskResource
for _, resources := range task.ResourcesMapUnsafe {
resourceList = append(resourceList, resources...)
}
return resourceList
}
// AddResource adds a resource to ResourcesMap
func (task *Task) AddResource(resourceType string, resource taskresource.TaskResource) {
task.lock.Lock()
defer task.lock.Unlock()
task.ResourcesMapUnsafe[resourceType] = append(task.ResourcesMapUnsafe[resourceType], resource)
}
// SetTerminalReason sets the terminalReason string and this can only be set
// once per the task's lifecycle. This field does not accept updates.
func (task *Task) SetTerminalReason(reason string) {
seelog.Infof("Task [%s]: attempting to set terminal reason for task [%s]", task.Arn, reason)
task.terminalReasonOnce.Do(func() {
seelog.Infof("Task [%s]: setting terminal reason for task [%s]", task.Arn, reason)
// Converts the first letter of terminal reason into capital letter
words := strings.Fields(reason)
words[0] = strings.Title(words[0])
task.terminalReason = strings.Join(words, " ")
})
}
// GetTerminalReason retrieves the terminalReason string
func (task *Task) GetTerminalReason() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.terminalReason
}
// PopulateASMAuthData sets docker auth credentials for a container
func (task *Task) PopulateASMAuthData(container *apicontainer.Container) error {
secretID := container.RegistryAuthentication.ASMAuthData.CredentialsParameter
resource, ok := task.getASMAuthResource()
if !ok {
return errors.New("task auth data: unable to fetch ASM resource")
}
// This will cause a panic if the resource is not of ASMAuthResource type.
// But, it's better to panic as we should have never reached condition
// unless we released an agent without any testing around that code path
asmResource := resource[0].(*asmauth.ASMAuthResource)
dac, ok := asmResource.GetASMDockerAuthConfig(secretID)
if !ok {
return errors.Errorf("task auth data: unable to fetch docker auth config [%s]", secretID)
}
container.SetASMDockerAuthConfig(dac)
return nil
}
func (task *Task) getASMAuthResource() ([]taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
res, ok := task.ResourcesMapUnsafe[asmauth.ResourceName]
return res, ok
}
// PopulateSSMSecrets appends the container's env var map with ssm parameters
func (task *Task) PopulateSSMSecrets(container *apicontainer.Container) *apierrors.DockerClientConfigError {
resource, ok := task.getSSMSecretsResource()
if !ok {
return &apierrors.DockerClientConfigError{"task secret data: unable to fetch SSM Secrets resource"}
}
ssmResource := resource[0].(*ssmsecret.SSMSecretResource)
envVars := make(map[string]string)
for _, secret := range container.Secrets {
if secret.Provider == apicontainer.SecretProviderSSM {
k := secret.GetSSMSecretResourceCacheKey()
if secretValue, ok := ssmResource.GetCachedSecretValue(k); ok {
envVars[secret.Name] = secretValue
}
}
}
container.MergeEnvironmentVariables(envVars)
return nil
}
func (task *Task) getSSMSecretsResource() ([]taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
res, ok := task.ResourcesMapUnsafe[ssmsecret.ResourceName]
return res, ok
}
// InitializeResources initializes the required field in the task on agent restart
// Some of the fields in task isn't saved in the agent state file, agent needs
// to initialize these fields before processing the task, eg: docker client in resource
func (task *Task) InitializeResources(resourceFields *taskresource.ResourceFields) {
task.lock.Lock()
defer task.lock.Unlock()
for _, resources := range task.ResourcesMapUnsafe {
for _, resource := range resources {
resource.Initialize(resourceFields, task.KnownStatusUnsafe, task.DesiredStatusUnsafe)
}
}
}
// Retrieves a Task's PIDMode
func (task *Task) getPIDMode() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.PIDMode
}
// Retrieves a Task's IPCMode
func (task *Task) getIPCMode() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.IPCMode
}
| 1 | 21,661 | You may need to make this configurable if we expect people to be able to use the normal Nvidia runtime on other Linux distributions like Ubuntu or Debian. | aws-amazon-ecs-agent | go |
@@ -177,10 +177,10 @@ func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg {
if len(remote.DenyExports) > 0 || len(remote.DenyImports) > 0 {
perms := &Permissions{}
if len(remote.DenyExports) > 0 {
- perms.Subscribe = &SubjectPermission{Deny: remote.DenyExports}
+ perms.Publish = &SubjectPermission{Deny: remote.DenyExports}
}
if len(remote.DenyImports) > 0 {
- perms.Publish = &SubjectPermission{Deny: remote.DenyImports}
+ perms.Subscribe = &SubjectPermission{Deny: remote.DenyImports}
}
cfg.perms = perms
} | 1 | // Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/url"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
)
// Warning when user configures leafnode TLS insecure
const leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!"
// When a loop is detected, delay the reconnect of solicited connection.
const leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second
// When a server receives a message causing a permission violation, the
// connection is closed and it won't attempt to reconnect for that long.
const leafNodeReconnectAfterPermViolation = 30 * time.Second
// Prefix for loop detection subject
const leafNodeLoopDetectionSubjectPrefix = "$LDS."
type leaf struct {
// Used to suppress sub and unsub interest. Same as routes but our audience
// here is tied to this leaf node. This will hold all subscriptions except this
// leaf nodes. This represents all the interest we want to send to the other side.
smap map[string]int32
// We have any auth stuff here for solicited connections.
remote *leafNodeCfg
// isSpoke tells us what role we are playing.
// Used when we receive a connection but otherside tells us they are a hub.
isSpoke bool
// This map will contain all the subscriptions that have been added to the smap
// during initLeafNodeSmapAndSendSubs. It is short lived and is there to avoid
// race between processing of a sub where sub is added to account sublist but
// updateSmap has not be called on that "thread", while in the LN readloop,
// when processing CONNECT, initLeafNodeSmapAndSendSubs is invoked and add
// this subscription to smap. When processing of the sub then calls updateSmap,
// we would add it a second time in the smap causing later unsub to suppress the LS-.
tsub map[*subscription]struct{}
tsubt *time.Timer
}
// Used for remote (solicited) leafnodes.
type leafNodeCfg struct {
sync.RWMutex
*RemoteLeafOpts
urls []*url.URL
curURL *url.URL
tlsName string
username string
password string
perms *Permissions
connDelay time.Duration // Delay before a connect, could be used while detecting loop condition, etc..
}
// Check to see if this is a solicited leafnode. We do special processing for solicited.
func (c *client) isSolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote != nil
}
// Returns true if this is a solicited leafnode and is not configured to be treated as a hub or a receiving
// connection leafnode where the otherside has declared itself to be the hub.
func (c *client) isSpokeLeafNode() bool {
return c.kind == LEAF && c.leaf.isSpoke
}
func (c *client) isHubLeafNode() bool {
return c.kind == LEAF && !c.leaf.isSpoke
}
// This will spin up go routines to solicit the remote leaf node connections.
func (s *Server) solicitLeafNodeRemotes(remotes []*RemoteLeafOpts) {
for _, r := range remotes {
remote := newLeafNodeCfg(r)
s.startGoRoutine(func() { s.connectToRemoteLeafNode(remote, true) })
}
}
func (s *Server) remoteLeafNodeStillValid(remote *leafNodeCfg) bool {
for _, ri := range s.getOpts().LeafNode.Remotes {
// FIXME(dlc) - What about auth changes?
if reflect.DeepEqual(ri.URLs, remote.URLs) {
return true
}
}
return false
}
// Ensure that leafnode is properly configured.
func validateLeafNode(o *Options) error {
if err := validateLeafNodeAuthOptions(o); err != nil {
return err
}
if o.LeafNode.Port == 0 {
return nil
}
if o.Gateway.Name == "" && o.Gateway.Port == 0 {
return nil
}
// If we are here we have both leaf nodes and gateways defined, make sure there
// is a system account defined.
if o.SystemAccount == "" {
return fmt.Errorf("leaf nodes and gateways (both being defined) require a system account to also be configured")
}
return nil
}
// Used to validate user names in LeafNode configuration.
// - rejects mix of single and multiple users.
// - rejects duplicate user names.
func validateLeafNodeAuthOptions(o *Options) error {
if len(o.LeafNode.Users) == 0 {
return nil
}
if o.LeafNode.Username != _EMPTY_ {
return fmt.Errorf("can not have a single user/pass and a users array")
}
users := map[string]struct{}{}
for _, u := range o.LeafNode.Users {
if _, exists := users[u.Username]; exists {
return fmt.Errorf("duplicate user %q detected in leafnode authorization", u.Username)
}
users[u.Username] = struct{}{}
}
return nil
}
func (s *Server) reConnectToRemoteLeafNode(remote *leafNodeCfg) {
delay := s.getOpts().LeafNode.ReconnectInterval
select {
case <-time.After(delay):
case <-s.quitCh:
s.grWG.Done()
return
}
s.connectToRemoteLeafNode(remote, false)
}
// Creates a leafNodeCfg object that wraps the RemoteLeafOpts.
func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg {
cfg := &leafNodeCfg{
RemoteLeafOpts: remote,
urls: make([]*url.URL, 0, len(remote.URLs)),
}
if len(remote.DenyExports) > 0 || len(remote.DenyImports) > 0 {
perms := &Permissions{}
if len(remote.DenyExports) > 0 {
perms.Subscribe = &SubjectPermission{Deny: remote.DenyExports}
}
if len(remote.DenyImports) > 0 {
perms.Publish = &SubjectPermission{Deny: remote.DenyImports}
}
cfg.perms = perms
}
// Start with the one that is configured. We will add to this
// array when receiving async leafnode INFOs.
cfg.urls = append(cfg.urls, cfg.URLs...)
// If we are TLS make sure we save off a proper servername if possible.
// Do same for user/password since we may need them to connect to
// a bare URL that we get from INFO protocol.
for _, u := range cfg.urls {
cfg.saveTLSHostname(u)
cfg.saveUserPassword(u)
}
return cfg
}
// Will pick an URL from the list of available URLs.
func (cfg *leafNodeCfg) pickNextURL() *url.URL {
cfg.Lock()
defer cfg.Unlock()
// If the current URL is the first in the list and we have more than
// one URL, then move that one to end of the list.
if cfg.curURL != nil && len(cfg.urls) > 1 && urlsAreEqual(cfg.curURL, cfg.urls[0]) {
first := cfg.urls[0]
copy(cfg.urls, cfg.urls[1:])
cfg.urls[len(cfg.urls)-1] = first
}
cfg.curURL = cfg.urls[0]
return cfg.curURL
}
// Returns the current URL
func (cfg *leafNodeCfg) getCurrentURL() *url.URL {
cfg.RLock()
defer cfg.RUnlock()
return cfg.curURL
}
// Returns how long the server should wait before attempting
// to solicit a remote leafnode connection.
func (cfg *leafNodeCfg) getConnectDelay() time.Duration {
cfg.RLock()
delay := cfg.connDelay
cfg.RUnlock()
return delay
}
// Sets the connect delay.
func (cfg *leafNodeCfg) setConnectDelay(delay time.Duration) {
cfg.Lock()
cfg.connDelay = delay
cfg.Unlock()
}
// Ensure that non-exported options (used in tests) have
// been properly set.
func (s *Server) setLeafNodeNonExportedOptions() {
opts := s.getOpts()
s.leafNodeOpts.dialTimeout = opts.LeafNode.dialTimeout
if s.leafNodeOpts.dialTimeout == 0 {
// Use same timeouts as routes for now.
s.leafNodeOpts.dialTimeout = DEFAULT_ROUTE_DIAL
}
s.leafNodeOpts.resolver = opts.LeafNode.resolver
if s.leafNodeOpts.resolver == nil {
s.leafNodeOpts.resolver = net.DefaultResolver
}
}
func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) {
defer s.grWG.Done()
if remote == nil || len(remote.URLs) == 0 {
s.Debugf("Empty remote leafnode definition, nothing to connect")
return
}
opts := s.getOpts()
reconnectDelay := opts.LeafNode.ReconnectInterval
s.mu.Lock()
dialTimeout := s.leafNodeOpts.dialTimeout
resolver := s.leafNodeOpts.resolver
s.mu.Unlock()
if connDelay := remote.getConnectDelay(); connDelay > 0 {
select {
case <-time.After(connDelay):
case <-s.quitCh:
return
}
remote.setConnectDelay(0)
}
var conn net.Conn
const connErrFmt = "Error trying to connect as leafnode to remote server %q (attempt %v): %v"
attempts := 0
for s.isRunning() && s.remoteLeafNodeStillValid(remote) {
rURL := remote.pickNextURL()
url, err := s.getRandomIP(resolver, rURL.Host)
if err == nil {
var ipStr string
if url != rURL.Host {
ipStr = fmt.Sprintf(" (%s)", url)
}
s.Debugf("Trying to connect as leafnode to remote server on %q%s", rURL.Host, ipStr)
conn, err = net.DialTimeout("tcp", url, dialTimeout)
}
if err != nil {
attempts++
if s.shouldReportConnectErr(firstConnect, attempts) {
s.Errorf(connErrFmt, rURL.Host, attempts, err)
} else {
s.Debugf(connErrFmt, rURL.Host, attempts, err)
}
select {
case <-s.quitCh:
return
case <-time.After(reconnectDelay):
continue
}
}
if !s.remoteLeafNodeStillValid(remote) {
conn.Close()
return
}
// We have a connection here to a remote server.
// Go ahead and create our leaf node and return.
s.createLeafNode(conn, remote)
// We will put this in the normal log if first connect, does not force -DV mode to know
// that the connect worked.
if firstConnect {
s.Noticef("Connected leafnode to %q", rURL.Host)
}
return
}
}
// Save off the tlsName for when we use TLS and mix hostnames and IPs. IPs usually
// come from the server we connect to.
func (cfg *leafNodeCfg) saveTLSHostname(u *url.URL) {
isTLS := cfg.TLSConfig != nil || u.Scheme == "tls"
if isTLS && cfg.tlsName == "" && net.ParseIP(u.Hostname()) == nil {
cfg.tlsName = u.Hostname()
}
}
// Save off the username/password for when we connect using a bare URL
// that we get from the INFO protocol.
func (cfg *leafNodeCfg) saveUserPassword(u *url.URL) {
if cfg.username == _EMPTY_ && u.User != nil {
cfg.username = u.User.Username()
cfg.password, _ = u.User.Password()
}
}
// This is the leafnode's accept loop. This runs as a go-routine.
// The listen specification is resolved (if use of random port),
// then a listener is started. After that, this routine enters
// a loop (until the server is shutdown) accepting incoming
// leaf node connections from remote servers.
func (s *Server) leafNodeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
port := opts.LeafNode.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on leafnode port: %d - %v", opts.LeafNode.Port, e)
return
}
s.Noticef("Listening for leafnode connections on %s",
net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
s.mu.Lock()
tlsRequired := opts.LeafNode.TLSConfig != nil
tlsVerify := tlsRequired && opts.LeafNode.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
info := Info{
ID: s.info.ID,
Version: s.info.Version,
GitCommit: gitCommit,
GoVersion: runtime.Version(),
AuthRequired: true,
TLSRequired: tlsRequired,
TLSVerify: tlsVerify,
MaxPayload: s.info.MaxPayload, // TODO(dlc) - Allow override?
Headers: s.supportsHeaders(),
Proto: 1, // Fixed for now.
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.LeafNode.Port = l.Addr().(*net.TCPAddr).Port
}
s.leafNodeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setLeafNodeInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", s.opts.LeafNode.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Add our LeafNode URL to the list that we send to servers connecting
// to our LeafNode accept URL. This call also regenerates leafNodeInfoJSON.
s.addLeafNodeURL(s.leafNodeInfo.IP)
// Setup state that can enable shutdown
s.leafNodeListener = l
// As of now, a server that does not have remotes configured would
// never solicit a connection, so we should not have to warn if
// InsecureSkipVerify is set in main LeafNodes config (since
// this TLS setting matters only when soliciting a connection).
// Still, warn if insecure is set in any of LeafNode block.
// We need to check remotes, even if tls is not required on accept.
warn := tlsRequired && opts.LeafNode.TLSConfig.InsecureSkipVerify
if !warn {
for _, r := range opts.LeafNode.Remotes {
if r.TLSConfig != nil && r.TLSConfig.InsecureSkipVerify {
warn = true
break
}
}
}
if warn {
s.Warnf(leafnodeTLSInsecureWarning)
}
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
tmpDelay = s.acceptError("LeafNode", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createLeafNode(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Leafnode accept loop exiting..")
s.done <- true
}
// RegEx to match a creds file with user JWT and Seed.
var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`)
// Lock should be held entering here.
func (c *client) sendLeafConnect(tlsRequired bool) {
// We support basic user/pass and operator based user JWT with signatures.
cinfo := leafConnectInfo{
TLS: tlsRequired,
Name: c.srv.info.ID,
Hub: c.leaf.remote.Hub,
}
// Check for credentials first, that will take precedence..
if creds := c.leaf.remote.Credentials; creds != "" {
c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials)
contents, err := ioutil.ReadFile(creds)
if err != nil {
c.Errorf("%v", err)
return
}
defer wipeSlice(contents)
items := credsRe.FindAllSubmatch(contents, -1)
if len(items) < 2 {
c.Errorf("Credentials file malformed")
return
}
// First result should be the user JWT.
// We copy here so that the file containing the seed will be wiped appropriately.
raw := items[0][1]
tmp := make([]byte, len(raw))
copy(tmp, raw)
// Seed is second item.
kp, err := nkeys.FromSeed(items[1][1])
if err != nil {
c.Errorf("Credentials file has malformed seed")
return
}
// Wipe our key on exit.
defer kp.Wipe()
sigraw, _ := kp.Sign(c.nonce)
sig := base64.RawURLEncoding.EncodeToString(sigraw)
cinfo.JWT = string(tmp)
cinfo.Sig = sig
} else if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
cinfo.User = userInfo.Username()
cinfo.Pass, _ = userInfo.Password()
} else if c.leaf.remote.username != _EMPTY_ {
cinfo.User = c.leaf.remote.username
cinfo.Pass = c.leaf.remote.password
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection(ProtocolViolation)
return
}
// Although this call is made before the writeLoop is created,
// we don't really need to send in place. The protocol will be
// sent out by the writeLoop.
c.enqueueProto([]byte(fmt.Sprintf(ConProto, b)))
}
// Makes a deep copy of the LeafNode Info structure.
// The server lock is held on entry.
func (s *Server) copyLeafNodeInfo() *Info {
clone := s.leafNodeInfo
// Copy the array of urls.
if len(s.leafNodeInfo.LeafNodeURLs) > 0 {
clone.LeafNodeURLs = append([]string(nil), s.leafNodeInfo.LeafNodeURLs...)
}
return &clone
}
// Adds a LeafNode URL that we get when a route connects to the Info structure.
// Regenerates the JSON byte array so that it can be sent to LeafNode connections.
// Returns a boolean indicating if the URL was added or not.
// Server lock is held on entry
func (s *Server) addLeafNodeURL(urlStr string) bool {
// Make sure we already don't have it.
for _, url := range s.leafNodeInfo.LeafNodeURLs {
if url == urlStr {
return false
}
}
s.leafNodeInfo.LeafNodeURLs = append(s.leafNodeInfo.LeafNodeURLs, urlStr)
s.generateLeafNodeInfoJSON()
return true
}
// Removes a LeafNode URL of the route that is disconnecting from the Info structure.
// Regenerates the JSON byte array so that it can be sent to LeafNode connections.
// Returns a boolean indicating if the URL was removed or not.
// Server lock is held on entry.
func (s *Server) removeLeafNodeURL(urlStr string) bool {
// Don't need to do this if we are removing the route connection because
// we are shuting down...
if s.shutdown {
return false
}
removed := false
urls := s.leafNodeInfo.LeafNodeURLs
for i, url := range urls {
if url == urlStr {
// If not last, move last into the position we remove.
last := len(urls) - 1
if i != last {
urls[i] = urls[last]
}
s.leafNodeInfo.LeafNodeURLs = urls[0:last]
removed = true
break
}
}
if removed {
s.generateLeafNodeInfoJSON()
}
return removed
}
func (s *Server) generateLeafNodeInfoJSON() {
b, _ := json.Marshal(s.leafNodeInfo)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
s.leafNodeInfoJSON = bytes.Join(pcs, []byte(" "))
}
// Sends an async INFO protocol so that the connected servers can update
// their list of LeafNode urls.
func (s *Server) sendAsyncLeafNodeInfo() {
for _, c := range s.leafs {
c.mu.Lock()
c.enqueueProto(s.leafNodeInfoJSON)
c.mu.Unlock()
}
}
// Called when an inbound leafnode connection is accepted or we create one for a solicited leafnode.
func (s *Server) createLeafNode(conn net.Conn, remote *leafNodeCfg) *client {
// Snapshot server options.
opts := s.getOpts()
maxPay := int32(opts.MaxPayload)
maxSubs := int32(opts.MaxSubs)
// For system, maxSubs of 0 means unlimited, so re-adjust here.
if maxSubs == 0 {
maxSubs = -1
}
now := time.Now()
c := &client{srv: s, nc: conn, kind: LEAF, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now}
// Do not update the smap here, we need to do it in initLeafNodeSmapAndSendSubs
c.leaf = &leaf{}
// Determines if we are soliciting the connection or not.
var solicited bool
var sendSysConnectEvent bool
var acc *Account
c.mu.Lock()
c.initClient()
if remote != nil {
solicited = true
// Users can bind to any local account, if its empty
// we will assume the $G account.
if remote.LocalAccount == "" {
remote.LocalAccount = globalAccountName
}
c.leaf.remote = remote
c.setPermissions(remote.perms)
if c.leaf.remote.Hub {
sendSysConnectEvent = true
} else {
c.leaf.isSpoke = true
}
c.mu.Unlock()
// TODO: Decide what should be the optimal behavior here.
// For now, if lookup fails, we will constantly try
// to recreate this LN connection.
var err error
acc, err = s.LookupAccount(remote.LocalAccount)
if err != nil {
c.Errorf("No local account %q for leafnode: %v", remote.LocalAccount, err)
c.closeConnection(MissingAccount)
return nil
}
c.mu.Lock()
c.acc = acc
} else {
c.flags.set(expectConnect)
}
c.mu.Unlock()
var nonce [nonceLen]byte
// Grab server variables
s.mu.Lock()
info := s.copyLeafNodeInfo()
if !solicited {
s.generateNonce(nonce[:])
}
s.mu.Unlock()
// Grab lock
c.mu.Lock()
if solicited {
// We need to wait here for the info, but not for too long.
c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE)
info, err := br.ReadString('\n')
if err != nil {
c.mu.Unlock()
if err == io.EOF {
c.closeConnection(ClientClosed)
} else {
c.closeConnection(ReadError)
}
return nil
}
c.nc.SetReadDeadline(time.Time{})
c.mu.Unlock()
// Handle only connection to wrong port here, others will be handled below.
if err := c.parse([]byte(info)); err == ErrConnectedToWrongPort {
c.Errorf(err.Error())
c.closeConnection(WrongPort)
return nil
}
c.mu.Lock()
if !c.flags.isSet(infoReceived) {
c.mu.Unlock()
c.Errorf("Did not get the remote leafnode's INFO, timed-out")
c.closeConnection(ReadError)
return nil
}
// Do TLS here as needed.
tlsRequired := remote.TLS || remote.TLSConfig != nil
if tlsRequired {
c.Debugf("Starting TLS leafnode client handshake")
// Specify the ServerName we are expecting.
var tlsConfig *tls.Config
if remote.TLSConfig != nil {
tlsConfig = remote.TLSConfig.Clone()
} else {
tlsConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
var host string
// If ServerName was given to us from the option, use that, always.
if tlsConfig.ServerName == "" {
url := remote.getCurrentURL()
host = url.Hostname()
// We need to check if this host is an IP. If so, we probably
// had this advertised to us and should use the configured host
// name for the TLS server name.
if remote.tlsName != "" && net.ParseIP(host) != nil {
host = remote.tlsName
}
tlsConfig.ServerName = host
}
c.nc = tls.Client(c.nc, tlsConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
var wait time.Duration
if remote.TLSTimeout == 0 {
wait = TLS_TIMEOUT
} else {
wait = secondsToDuration(remote.TLSTimeout)
}
time.AfterFunc(wait, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(wait))
// Force handshake
c.mu.Unlock()
if err = conn.Handshake(); err != nil {
// If we overrode and used the saved tlsName but that failed
// we will clear that here. This is for the case that another server
// does not have the same tlsName, maybe only IPs.
// https://github.com/nats-io/nats-server/issues/1256
if _, ok := err.(x509.HostnameError); ok {
remote.Lock()
if host == remote.tlsName {
remote.tlsName = ""
}
remote.Unlock()
}
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
}
c.sendLeafConnect(tlsRequired)
c.Debugf("Remote leafnode connect msg sent")
} else {
// Send our info to the other side.
// Remember the nonce we sent here for signatures, etc.
c.nonce = make([]byte, nonceLen)
copy(c.nonce, nonce[:])
info.Nonce = string(c.nonce)
info.CID = c.cid
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
// We have to send from this go routine because we may
// have to block for TLS handshake before we start our
// writeLoop go routine. The other side needs to receive
// this before it can initiate the TLS handshake..
c.sendProtoNow(bytes.Join(pcs, []byte(" ")))
// Check to see if we need to spin up TLS.
if info.TLSRequired {
c.Debugf("Starting TLS leafnode server handshake")
c.nc = tls.Server(c.nc, opts.LeafNode.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.LeafNode.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
}
// Leaf nodes will always require a CONNECT to let us know
// when we are properly bound to an account.
// The connection may have been closed
if !c.isClosed() {
c.setAuthTimer(secondsToDuration(opts.LeafNode.AuthTimeout))
}
}
// Keep track in case server is shutdown before we can successfully register.
if !s.addToTempClients(c.cid, c) {
c.mu.Unlock()
c.setNoReconnect()
c.closeConnection(ServerShutdown)
return nil
}
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop(nil) })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
// Set the Ping timer
s.setFirstPingTimer(c)
c.mu.Unlock()
c.Debugf("Leafnode connection created")
// Update server's accounting here if we solicited.
// Also send our local subs.
if solicited {
// Make sure we register with the account here.
c.registerWithAccount(acc)
s.addLeafNodeConnection(c)
s.initLeafNodeSmapAndSendSubs(c)
if sendSysConnectEvent {
s.sendLeafNodeConnect(acc)
}
// The above functions are not atomically under the client
// lock doing those operations. It is possible - since we
// have started the read/write loops - that the connection
// is closed before or in between. This would leave the
// closed LN connection possible registered with the account
// and/or the server's leafs map. So check if connection
// is closed, and if so, manually cleanup.
c.mu.Lock()
closed := c.isClosed()
c.mu.Unlock()
if closed {
s.removeLeafNodeConnection(c)
if prev := acc.removeClient(c); prev == 1 {
s.decActiveAccounts()
}
}
}
return c
}
func (c *client) processLeafnodeInfo(info *Info) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.leaf == nil || c.isClosed() {
return nil
}
// Mark that the INFO protocol has been received.
// Note: For now, only the initial INFO has a nonce. We
// will probably do auto key rotation at some point.
if c.flags.setIfNotSet(infoReceived) {
// Prevent connecting to non leafnode port. Need to do this only for
// the first INFO, not for async INFO updates...
//
// Content of INFO sent by the server when accepting a tcp connection.
// -------------------------------------------------------------------
// Listen Port Of | CID | ClientConnectURLs | LeafNodeURLs | Gateway |
// -------------------------------------------------------------------
// CLIENT | X* | X** | | |
// ROUTE | | X** | X*** | |
// GATEWAY | | | | X |
// LEAFNODE | X | | X | |
// -------------------------------------------------------------------
// * Not on older servers.
// ** Not if "no advertise" is enabled.
// *** Not if leafnode's "no advertise" is enabled.
//
// As seen from above, a solicited LeafNode connection should receive
// from the remote server an INFO with CID and LeafNodeURLs. Anything
// else should be considered an attempt to connect to a wrong port.
if c.leaf.remote != nil && (info.CID == 0 || info.LeafNodeURLs == nil) {
return ErrConnectedToWrongPort
}
// Capture a nonce here.
c.nonce = []byte(info.Nonce)
if info.TLSRequired && c.leaf.remote != nil {
c.leaf.remote.TLS = true
}
supportsHeaders := c.srv.supportsHeaders()
c.headers = supportsHeaders && info.Headers
}
// For both initial INFO and async INFO protocols, Possibly
// update our list of remote leafnode URLs we can connect to.
if c.leaf.remote != nil && len(info.LeafNodeURLs) > 0 {
// Consider the incoming array as the most up-to-date
// representation of the remote cluster's list of URLs.
c.updateLeafNodeURLs(info)
}
return nil
}
// When getting a leaf node INFO protocol, use the provided
// array of urls to update the list of possible endpoints.
func (c *client) updateLeafNodeURLs(info *Info) {
cfg := c.leaf.remote
cfg.Lock()
defer cfg.Unlock()
cfg.urls = make([]*url.URL, 0, 1+len(info.LeafNodeURLs))
// Add the ones we receive in the protocol
for _, surl := range info.LeafNodeURLs {
url, err := url.Parse("nats-leaf://" + surl)
if err != nil {
c.Errorf("Error parsing url %q: %v", surl, err)
continue
}
// Do not add if it's the same as what we already have configured.
var dup bool
for _, u := range cfg.URLs {
// URLs that we receive never have user info, but the
// ones that were configured may have. Simply compare
// host and port to decide if they are equal or not.
if url.Host == u.Host && url.Port() == u.Port() {
dup = true
break
}
}
if !dup {
cfg.urls = append(cfg.urls, url)
cfg.saveTLSHostname(url)
}
}
// Add the configured one
cfg.urls = append(cfg.urls, cfg.URLs...)
}
// Similar to setInfoHostPortAndGenerateJSON, but for leafNodeInfo.
func (s *Server) setLeafNodeInfoHostPortAndIP() error {
opts := s.getOpts()
if opts.LeafNode.Advertise != _EMPTY_ {
advHost, advPort, err := parseHostPort(opts.LeafNode.Advertise, opts.LeafNode.Port)
if err != nil {
return err
}
s.leafNodeInfo.Host = advHost
s.leafNodeInfo.Port = advPort
} else {
s.leafNodeInfo.Host = opts.LeafNode.Host
s.leafNodeInfo.Port = opts.LeafNode.Port
// If the host is "0.0.0.0" or "::" we need to resolve to a public IP.
// This will return at most 1 IP.
hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(s.leafNodeInfo.Host, false)
if err != nil {
return err
}
if hostIsIPAny {
if len(ips) == 0 {
s.Errorf("Could not find any non-local IP for leafnode's listen specification %q",
s.leafNodeInfo.Host)
} else {
// Take the first from the list...
s.leafNodeInfo.Host = ips[0]
}
}
}
// Use just host:port for the IP
s.leafNodeInfo.IP = net.JoinHostPort(s.leafNodeInfo.Host, strconv.Itoa(s.leafNodeInfo.Port))
if opts.LeafNode.Advertise != _EMPTY_ {
s.Noticef("Advertise address for leafnode is set to %s", s.leafNodeInfo.IP)
}
return nil
}
func (s *Server) addLeafNodeConnection(c *client) {
c.mu.Lock()
cid := c.cid
c.mu.Unlock()
s.mu.Lock()
s.leafs[cid] = c
s.mu.Unlock()
s.removeFromTempClients(cid)
}
func (s *Server) removeLeafNodeConnection(c *client) {
c.mu.Lock()
cid := c.cid
if c.leaf != nil && c.leaf.tsubt != nil {
c.leaf.tsubt.Stop()
c.leaf.tsubt = nil
}
c.mu.Unlock()
s.mu.Lock()
delete(s.leafs, cid)
s.mu.Unlock()
s.removeFromTempClients(cid)
}
type leafConnectInfo struct {
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Comp bool `json:"compression,omitempty"`
Name string `json:"name,omitempty"`
Hub bool `json:"is_hub,omitempty"`
// Just used to detect wrong connection attempts.
Gateway string `json:"gateway,omitempty"`
}
// processLeafNodeConnect will process the inbound connect args.
// Once we are here we are bound to an account, so can send any interest that
// we would have to the other side.
func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) error {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provided "lang" in the CONNECT protocol while LEAFNODEs don't.
if lang != "" {
c.sendErrAndErr(ErrClientConnectedToLeafNodePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToLeafNodePort
}
// Unmarshal as a leaf node connect protocol
proto := &leafConnectInfo{}
if err := json.Unmarshal(arg, proto); err != nil {
return err
}
// Reject if this has Gateway which means that it would be from a gateway
// connection that incorrectly connects to the leafnode port.
if proto.Gateway != "" {
errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway)
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongGateway)
return ErrWrongGateway
}
// Leaf Nodes do not do echo or verbose or pedantic.
c.opts.Verbose = false
c.opts.Echo = false
c.opts.Pedantic = false
// If the other side has declared itself a hub, so we will take on the spoke role.
if proto.Hub {
c.leaf.isSpoke = true
}
// Create and initialize the smap since we know our bound account now.
// This will send all registered subs too.
s.initLeafNodeSmapAndSendSubs(c)
// Add in the leafnode here since we passed through auth at this point.
s.addLeafNodeConnection(c)
// Announce the account connect event for a leaf node.
// This will no-op as needed.
s.sendLeafNodeConnect(c.acc)
return nil
}
// Snapshot the current subscriptions from the sublist into our smap which
// we will keep updated from now on.
// Also send the registered subscriptions.
func (s *Server) initLeafNodeSmapAndSendSubs(c *client) {
acc := c.acc
if acc == nil {
c.Debugf("Leafnode does not have an account bound")
return
}
// Collect all account subs here.
_subs := [32]*subscription{}
subs := _subs[:0]
ims := []string{}
acc.mu.Lock()
accName := acc.Name
// If we are solicited we only send interest for local clients.
if c.isSpokeLeafNode() {
acc.sl.localSubs(&subs)
} else {
acc.sl.All(&subs)
}
// Check if we have an existing service import reply.
siReply := acc.siReply
// Since leaf nodes only send on interest, if the bound
// account has import services we need to send those over.
for isubj := range acc.imports.services {
ims = append(ims, isubj)
}
// Create a unique subject that will be used for loop detection.
lds := acc.lds
if lds == _EMPTY_ {
lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next()
acc.lds = lds
}
acc.mu.Unlock()
// Now check for gateway interest. Leafnodes will put this into
// the proper mode to propagate, but they are not held in the account.
gwsa := [16]*client{}
gws := gwsa[:0]
s.getOutboundGatewayConnections(&gws)
for _, cgw := range gws {
cgw.mu.Lock()
gw := cgw.gw
cgw.mu.Unlock()
if gw != nil {
if ei, _ := gw.outsim.Load(accName); ei != nil {
if e := ei.(*outsie); e != nil && e.sl != nil {
e.sl.All(&subs)
}
}
}
}
applyGlobalRouting := s.gateway.enabled
if c.isSpokeLeafNode() {
// Add a fake subscription for this solicited leafnode connection
// so that we can send back directly for mapped GW replies.
c.srv.gwLeafSubs.Insert(&subscription{client: c, subject: []byte(gwReplyPrefix + ">")})
}
// Now walk the results and add them to our smap
c.mu.Lock()
c.leaf.smap = make(map[string]int32)
for _, sub := range subs {
// We ignore ourselves here.
if c != sub.client {
c.leaf.smap[keyFromSub(sub)]++
if c.leaf.tsub == nil {
c.leaf.tsub = make(map[*subscription]struct{})
}
c.leaf.tsub[sub] = struct{}{}
}
}
// FIXME(dlc) - We need to update appropriately on an account claims update.
for _, isubj := range ims {
c.leaf.smap[isubj]++
}
// If we have gateways enabled we need to make sure the other side sends us responses
// that have been augmented from the original subscription.
// TODO(dlc) - Should we lock this down more?
if applyGlobalRouting {
c.leaf.smap[oldGWReplyPrefix+"*.>"]++
c.leaf.smap[gwReplyPrefix+">"]++
}
// Detect loop by subscribing to a specific subject and checking
// if this is coming back to us.
c.leaf.smap[lds]++
// Check if we need to add an existing siReply to our map.
// This will be a prefix so add on the wildcard.
if siReply != nil {
wcsub := append(siReply, '>')
c.leaf.smap[string(wcsub)]++
}
// Queue all protocols. There is no max pending limit for LN connection,
// so we don't need chunking. The writes will happen from the writeLoop.
var b bytes.Buffer
for key, n := range c.leaf.smap {
c.writeLeafSub(&b, key, n)
}
if b.Len() > 0 {
c.queueOutbound(b.Bytes())
c.flushSignal()
}
if c.leaf.tsub != nil {
// Clear the tsub map after 5 seconds.
c.leaf.tsubt = time.AfterFunc(5*time.Second, func() {
c.mu.Lock()
if c.leaf != nil {
c.leaf.tsub = nil
c.leaf.tsubt = nil
}
c.mu.Unlock()
})
}
c.mu.Unlock()
}
// updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-.
func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) {
acc, err := s.LookupAccount(accName)
if acc == nil || err != nil {
s.Debugf("No or bad account for %q, failed to update interest from gateway", accName)
return
}
s.updateLeafNodes(acc, sub, delta)
}
// updateLeafNodes will make sure to update the smap for the subscription. Will
// also forward to all leaf nodes as needed.
func (s *Server) updateLeafNodes(acc *Account, sub *subscription, delta int32) {
if acc == nil || sub == nil {
return
}
_l := [32]*client{}
leafs := _l[:0]
// Grab all leaf nodes. Ignore a leafnode if sub's client is a leafnode and matches.
acc.mu.RLock()
for _, ln := range acc.lleafs {
if ln != sub.client {
leafs = append(leafs, ln)
}
}
acc.mu.RUnlock()
for _, ln := range leafs {
ln.updateSmap(sub, delta)
}
}
// This will make an update to our internal smap and determine if we should send out
// an interest update to the remote side.
func (c *client) updateSmap(sub *subscription, delta int32) {
key := keyFromSub(sub)
c.mu.Lock()
if c.leaf.smap == nil {
c.mu.Unlock()
return
}
// If we are solicited make sure this is a local client or a non-solicited leaf node
skind := sub.client.kind
updateClient := skind == CLIENT || skind == SYSTEM || skind == JETSTREAM || skind == ACCOUNT
if c.isSpokeLeafNode() && !(updateClient || (skind == LEAF && !sub.client.isSpokeLeafNode())) {
c.mu.Unlock()
return
}
// For additions, check if that sub has just been processed during initLeafNodeSmapAndSendSubs
if delta > 0 && c.leaf.tsub != nil {
if _, present := c.leaf.tsub[sub]; present {
delete(c.leaf.tsub, sub)
if len(c.leaf.tsub) == 0 {
c.leaf.tsub = nil
c.leaf.tsubt.Stop()
c.leaf.tsubt = nil
}
c.mu.Unlock()
return
}
}
n := c.leaf.smap[key]
// We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0.
update := sub.queue != nil || n == 0 || n+delta <= 0
n += delta
if n > 0 {
c.leaf.smap[key] = n
} else {
delete(c.leaf.smap, key)
}
if update {
c.sendLeafNodeSubUpdate(key, n)
}
c.mu.Unlock()
}
// Send the subscription interest change to the other side.
// Lock should be held.
func (c *client) sendLeafNodeSubUpdate(key string, n int32) {
_b := [64]byte{}
b := bytes.NewBuffer(_b[:0])
c.writeLeafSub(b, key, n)
c.enqueueProto(b.Bytes())
}
// Helper function to build the key.
func keyFromSub(sub *subscription) string {
var _rkey [1024]byte
var key []byte
if sub.queue != nil {
// Just make the key subject spc group, e.g. 'foo bar'
key = _rkey[:0]
key = append(key, sub.subject...)
key = append(key, byte(' '))
key = append(key, sub.queue...)
} else {
key = sub.subject
}
return string(key)
}
// Lock should be held.
func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) {
if key == "" {
return
}
if n > 0 {
w.WriteString("LS+ " + key)
// Check for queue semantics, if found write n.
if strings.Contains(key, " ") {
w.WriteString(" ")
var b [12]byte
var i = len(b)
for l := n; l > 0; l /= 10 {
i--
b[i] = digits[l%10]
}
w.Write(b[i:])
if c.trace {
arg := fmt.Sprintf("%s %d", key, n)
c.traceOutOp("LS+", []byte(arg))
}
} else if c.trace {
c.traceOutOp("LS+", []byte(key))
}
} else {
w.WriteString("LS- " + key)
if c.trace {
c.traceOutOp("LS-", []byte(key))
}
}
w.WriteString(CR_LF)
}
// processLeafSub will process an inbound sub request for the remote leaf node.
func (c *client) processLeafSub(argo []byte) (err error) {
// Indicate activity.
c.in.subs++
srv := c.srv
if srv == nil {
return nil
}
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 1:
sub.queue = nil
case 3:
sub.queue = args[1]
sub.qw = int32(parseSize(args[2]))
default:
return fmt.Errorf("processLeafSub Parse Error: '%s'", arg)
}
sub.subject = args[0]
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return nil
}
acc := c.acc
// Check if we have a loop.
ldsPrefix := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix))
if ldsPrefix && string(sub.subject) == acc.getLDSubject() {
c.mu.Unlock()
c.handleLeafNodeLoop(true)
return nil
}
// Check permissions if applicable. (but exclude the $LDS, $GR and _GR_)
checkPerms := true
if sub.subject[0] == '$' || sub.subject[0] == '_' {
if ldsPrefix ||
bytes.HasPrefix(sub.subject, []byte(oldGWReplyPrefix)) ||
bytes.HasPrefix(sub.subject, []byte(gwReplyPrefix)) {
checkPerms = false
}
}
if checkPerms && !c.canExport(string(sub.subject)) {
c.mu.Unlock()
c.leafSubPermViolation(sub.subject)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// Like Routes, we store local subs by account and subject and optionally queue name.
// If we have a queue it will have a trailing weight which we do not want.
if sub.queue != nil {
sub.sid = arg[:len(arg)-len(args[2])-1]
} else {
sub.sid = arg
}
key := string(sub.sid)
osub := c.subs[key]
updateGWs := false
if osub == nil {
c.subs[key] = sub
// Now place into the account sl.
if err := acc.sl.Insert(sub); err != nil {
delete(c.subs, key)
c.mu.Unlock()
c.Errorf("Could not insert subscription: %v", err)
c.sendErr("Invalid Subscription")
return nil
}
updateGWs = srv.gateway.enabled
} else if sub.queue != nil {
// For a queue we need to update the weight.
atomic.StoreInt32(&osub.qw, sub.qw)
acc.sl.UpdateRemoteQSub(osub)
}
spoke := c.isSpokeLeafNode()
c.mu.Unlock()
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
// If we are not solicited, treat leaf node subscriptions similar to a
// client subscription, meaning we forward them to routes, gateways and
// other leaf nodes as needed.
if !spoke {
// If we are routing add to the route map for the associated account.
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates for other leaf nodes. We understand solicited
// and non-solicited state in this call so we will do the right thing.
srv.updateLeafNodes(acc, sub, 1)
return nil
}
// If the leafnode is a solicited, set the connect delay based on default
// or private option (for tests). Sends the error to the other side, log and
// close the connection.
func (c *client) handleLeafNodeLoop(sendErr bool) {
accName, delay := c.setLeafConnectDelayIfSoliciting(leafNodeReconnectDelayAfterLoopDetected)
errTxt := fmt.Sprintf("Loop detected for leafnode account=%q. Delaying attempt to reconnect for %v", accName, delay)
if sendErr {
c.sendErr(errTxt)
}
c.Errorf(errTxt)
// If we are here with "sendErr" false, it means that this is the server
// that received the error. The other side will have closed the connection,
// but does not hurt to close here too.
c.closeConnection(ProtocolViolation)
}
// processLeafUnsub will process an inbound unsub request for the remote leaf node.
func (c *client) processLeafUnsub(arg []byte) error {
// Indicate any activity, so pub and sub or unsubs.
c.in.subs++
acc := c.acc
srv := c.srv
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return nil
}
updateGWs := false
// We store local subs by account and subject and optionally queue name.
// LS- will have the arg exactly as the key.
sub, ok := c.subs[string(arg)]
c.mu.Unlock()
if ok {
c.unsubscribe(acc, sub, true, true)
updateGWs = srv.gateway.enabled
}
// If we are routing subtract from the route map for the associated account.
srv.updateRouteSubscriptionMap(acc, sub, -1)
// Gateways
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
// Now check on leafnode updates for other leaf nodes.
srv.updateLeafNodes(acc, sub, -1)
return nil
}
func (c *client) processLeafHeaderMsgArgs(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1, 2:
return fmt.Errorf("processLeafHeaderMsgArgs Parse Error: '%s'", args)
case 3:
c.pa.reply = nil
c.pa.queues = nil
c.pa.hdb = args[1]
c.pa.hdr = parseSize(args[1])
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
case 4:
c.pa.reply = args[1]
c.pa.queues = nil
c.pa.hdb = args[2]
c.pa.hdr = parseSize(args[2])
c.pa.szb = args[3]
c.pa.size = parseSize(args[3])
default:
// args[1] is our reply indicator. Should be + or | normally.
if len(args[1]) != 1 {
return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
switch args[1][0] {
case '+':
c.pa.reply = args[2]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
// Grab header size.
c.pa.hdb = args[len(args)-2]
c.pa.hdr = parseSize(c.pa.hdb)
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[3 : len(args)-2]
} else {
c.pa.queues = args[2 : len(args)-2]
}
}
if c.pa.hdr < 0 {
return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Header Size: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Size: '%s'", args)
}
if c.pa.hdr > c.pa.size {
return fmt.Errorf("processLeafHeaderMsgArgs Header Size larger then TotalSize: '%s'", arg)
}
// Common ones processed after check for arg length
c.pa.subject = args[0]
return nil
}
func (c *client) processLeafMsgArgs(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1:
return fmt.Errorf("processLeafMsgArgs Parse Error: '%s'", args)
case 2:
c.pa.reply = nil
c.pa.queues = nil
c.pa.szb = args[1]
c.pa.size = parseSize(args[1])
case 3:
c.pa.reply = args[1]
c.pa.queues = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
default:
// args[1] is our reply indicator. Should be + or | normally.
if len(args[1]) != 1 {
return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
switch args[1][0] {
case '+':
c.pa.reply = args[2]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[3 : len(args)-1]
} else {
c.pa.queues = args[2 : len(args)-1]
}
}
if c.pa.size < 0 {
return fmt.Errorf("processLeafMsgArgs Bad or Missing Size: '%s'", args)
}
// Common ones processed after check for arg length
c.pa.subject = args[0]
return nil
}
// processInboundLeafMsg is called to process an inbound msg from a leaf node.
func (c *client) processInboundLeafMsg(msg []byte) {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += int32(len(msg) - LEN_CR_LF)
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.leafPubPermViolation(c.pa.subject)
return
}
srv := c.srv
acc := c.acc
// Mostly under testing scenarios.
if srv == nil || acc == nil {
return
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Collect queue names if needed.
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If we have queue subs in this cluster, then if we run in gateway
// mode and the remote gateways have queue subs, then we need to
// collect the queue groups this message was sent to so that we
// exclude them when sending to gateways.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
_, qnames = c.processMsgResults(acc, r, msg, nil, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames)
}
}
// Handles a publish permission violation.
// See leafPermViolation() for details.
func (c *client) leafPubPermViolation(subj []byte) {
c.leafPermViolation(true, subj)
}
// Handles a subscription permission violation.
// See leafPermViolation() for details.
func (c *client) leafSubPermViolation(subj []byte) {
c.leafPermViolation(false, subj)
}
// Common function to process publish or subscribe leafnode permission violation.
// Sends the permission violation error to the remote, logs it and closes the connection.
// If this is from a server soliciting, the reconnection will be delayed.
func (c *client) leafPermViolation(pub bool, subj []byte) {
c.setLeafConnectDelayIfSoliciting(leafNodeReconnectAfterPermViolation)
var action string
if pub {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subj))
action = "Publish"
} else {
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", subj))
action = "Subscription"
}
c.Errorf("%s Violation on %q - Check other side configuration", action, subj)
// TODO: add a new close reason that is more appropriate?
c.closeConnection(ProtocolViolation)
}
// Invoked from generic processErr() for LEAF connections.
func (c *client) leafProcessErr(errStr string) {
// We will look for Loop detected error coming from the other side.
// If we solicit, set the connect delay.
if !strings.Contains(errStr, "Loop detected") {
return
}
c.handleLeafNodeLoop(false)
}
// If this leaf connection solicits, sets the connect delay to the given value,
// or the one from the server option's LeafNode.connDelay if one is set (for tests).
// Returns the connection's account name and delay.
func (c *client) setLeafConnectDelayIfSoliciting(delay time.Duration) (string, time.Duration) {
c.mu.Lock()
if c.isSolicitedLeafNode() {
if s := c.srv; s != nil {
if srvdelay := s.getOpts().LeafNode.connDelay; srvdelay != 0 {
delay = srvdelay
}
}
c.leaf.remote.setConnectDelay(delay)
}
accName := c.acc.Name
c.mu.Unlock()
return accName, delay
}
| 1 | 10,945 | Originally the DenyExport was a subscribe permission because it meant that if on this LN connection, if we deny export of "foo" it means that it would reject a subscription (hence subscribe permission) on "foo" from the other side. Now you are changing to simply not allowing this server to publish on "foo". I am not saying this is wrong, but we completely flip the meaning. | nats-io-nats-server | go |
@@ -159,6 +159,12 @@ dsts_first(void)
return TESTANY(DR_DISASM_INTEL | DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask));
}
+static inline bool
+opmask_with_dsts(void)
+{
+ return TESTANY(DR_DISASM_INTEL | DR_DISASM_ATT, DYNAMO_OPTION(disasm_mask));
+}
+
static void
internal_instr_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT,
dcontext_t *dcontext, instr_t *instr); | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2001-2009 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2001 Hewlett-Packard Company */
/* disassemble.c -- printing of instructions
*
* Note that when printing out instructions:
* Uses DR syntax of "srcs -> dsts" including implicit operands, unless
* a -syntax_* runtime option is specified or disassemble_set_syntax() is called.
*/
/*
* XXX disassembly discrepancies:
* 1) I print "%st(0),%st(1)", gdb prints "%st,%st(1)"
* 2) I print movzx, gdb prints movzw (with an 'l' suffix tacked on)
* 3) gdb says bound and leave are to be printed "Intel order", not AT&T ?!?
* From gdb: "The enter and bound instructions are printed with operands
* in the same order as the intel book; everything else is printed in
* reverse order."
*/
#include "../globals.h"
#include "arch.h"
#include "instr.h"
#include "decode.h"
#include "decode_fast.h"
#include "disassemble.h"
#include "../module_shared.h"
/* these are only needed for symbolic address lookup: */
#include "../fragment.h" /* for fragment_pclookup */
#include "../link.h" /* for linkstub lookup */
#include "../fcache.h" /* for in_fcache */
#if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE)
# ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# endif
/****************************************************************************
* Arch-specific routines
*/
int
print_bytes_to_buffer(char *buf, size_t bufsz, size_t *sofar INOUT, byte *pc,
byte *next_pc, instr_t *instr);
void
print_extra_bytes_to_buffer(char *buf, size_t bufsz, size_t *sofar INOUT, byte *pc,
byte *next_pc, int extra_sz, const char *extra_bytes_prefix);
void
opnd_base_disp_scale_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT,
opnd_t opnd);
bool
opnd_disassemble_arch(char *buf, size_t bufsz, size_t *sofar INOUT, opnd_t opnd);
bool
opnd_disassemble_noimplicit(char *buf, size_t bufsz, size_t *sofar INOUT,
dcontext_t *dcontext, instr_t *instr, byte optype,
opnd_t opnd, bool prev, bool multiple_encodings, bool dst,
int *idx INOUT);
void
print_instr_prefixes(dcontext_t *dcontext, instr_t *instr, char *buf, size_t bufsz,
size_t *sofar INOUT);
void
print_opcode_name(instr_t *instr, const char *name, char *buf, size_t bufsz,
size_t *sofar INOUT);
/****************************************************************************
* Printing of instructions
*/
void
disassemble_options_init(void)
{
dr_disasm_flags_t flags = DYNAMO_OPTION(disasm_mask);
if (DYNAMO_OPTION(syntax_intel)) {
flags |= DR_DISASM_INTEL;
flags &= ~DR_DISASM_ATT; /* mutually exclusive */
}
if (DYNAMO_OPTION(syntax_att)) {
flags |= DR_DISASM_ATT;
flags &= ~DR_DISASM_INTEL; /* mutually exclusive */
}
if (DYNAMO_OPTION(syntax_arm)) {
flags |= DR_DISASM_ARM;
}
/* This option is separate as it's not strictly a disasm style */
dynamo_options.decode_strict = TEST(DR_DISASM_STRICT_INVALID, flags);
if (DYNAMO_OPTION(decode_strict))
flags |= DR_DISASM_STRICT_INVALID; /* for completeness */
dynamo_options.disasm_mask = flags;
}
DR_API
void
disassemble_set_syntax(dr_disasm_flags_t flags)
{
# ifndef STANDALONE_DECODER
options_make_writable();
# endif
dynamo_options.disasm_mask = flags;
/* This option is separate as it's not strictly a disasm style */
dynamo_options.decode_strict = TEST(DR_DISASM_STRICT_INVALID, flags);
# ifndef STANDALONE_DECODER
options_restore_readonly();
# endif
}
static inline bool
dsts_first(void)
{
return TESTANY(DR_DISASM_INTEL | DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask));
}
static void
internal_instr_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT,
dcontext_t *dcontext, instr_t *instr);
static inline const char *
immed_prefix(void)
{
return (TEST(DR_DISASM_INTEL, DYNAMO_OPTION(disasm_mask))
? ""
: (TEST(DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask)) ? "#" : "$"));
}
void
reg_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT, reg_id_t reg,
dr_opnd_flags_t flags, const char *prefix, const char *suffix)
{
print_to_buffer(buf, bufsz, sofar,
TESTANY(DR_DISASM_INTEL | DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask))
? "%s%s%s%s"
: "%s%s%%%s%s",
prefix, TEST(DR_OPND_NEGATED, flags) ? "-" : "", reg_names[reg],
suffix);
}
static const char *
opnd_size_suffix_dr(opnd_t opnd)
{
int sz = opnd_size_in_bytes(opnd_get_size(opnd));
switch (sz) {
case 1: return "1byte";
case 2: return "2byte";
case 3: return "3byte";
case 4: return "4byte";
case 6: return "6byte";
case 8: return "8byte";
case 10: return "10byte";
case 12: return "12byte";
case 14: return "14byte";
case 15: return "15byte";
case 16: return "16byte";
case 20: return "20byte";
case 24: return "24byte";
case 28: return "28byte";
case 32: return "32byte";
case 36: return "36byte";
case 40: return "40byte";
case 44: return "44byte";
case 48: return "48byte";
case 52: return "52byte";
case 56: return "56byte";
case 60: return "60byte";
case 64: return "64byte";
case 68: return "68byte";
case 72: return "72byte";
case 76: return "76byte";
case 80: return "80byte";
case 84: return "84byte";
case 88: return "88byte";
case 92: return "92byte";
case 94: return "94byte";
case 96: return "96byte";
case 100: return "100byte";
case 104: return "104byte";
case 108: return "108byte";
case 112: return "112byte";
case 116: return "116byte";
case 120: return "120byte";
case 124: return "124byte";
case 128: return "128byte";
case 512: return "512byte";
}
return "";
}
static const char *
opnd_size_suffix_intel(opnd_t opnd)
{
int sz = opnd_size_in_bytes(opnd_get_size(opnd));
switch (sz) {
case 1: return "byte";
case 2: return "word";
case 4: return "dword";
case 6: return "fword";
case 8: return "qword";
case 10: return "tbyte";
case 12: return "";
case 16: return "oword";
case 32: return "yword";
}
return "";
}
static void
opnd_mem_disassemble_prefix(char *buf, size_t bufsz, size_t *sofar INOUT, opnd_t opnd)
{
if (TEST(DR_DISASM_INTEL, DYNAMO_OPTION(disasm_mask))) {
const char *size_str = opnd_size_suffix_intel(opnd);
if (size_str[0] != '\0')
print_to_buffer(buf, bufsz, sofar, "%s ptr [", size_str);
else /* assume size implied by opcode */
print_to_buffer(buf, bufsz, sofar, "[");
} else if (TEST(DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask))) {
print_to_buffer(buf, bufsz, sofar, "[");
}
}
static void
opnd_base_disp_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT, opnd_t opnd)
{
reg_id_t seg = opnd_get_segment(opnd);
reg_id_t base = opnd_get_base(opnd);
int disp = opnd_get_disp(opnd);
reg_id_t index = opnd_get_index(opnd);
opnd_mem_disassemble_prefix(buf, bufsz, sofar, opnd);
if (seg != REG_NULL)
reg_disassemble(buf, bufsz, sofar, seg, 0, "", ":");
if (TESTANY(DR_DISASM_INTEL | DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask))) {
if (base != REG_NULL)
reg_disassemble(buf, bufsz, sofar, base, 0, "", "");
if (index != REG_NULL) {
reg_disassemble(
buf, bufsz, sofar, index, opnd_get_flags(opnd),
(base != REG_NULL && !TEST(DR_OPND_NEGATED, opnd_get_flags(opnd))) ? "+"
: "",
"");
opnd_base_disp_scale_disassemble(buf, bufsz, sofar, opnd);
}
}
if (disp != 0 || (base == REG_NULL && index == REG_NULL) ||
opnd_is_disp_encode_zero(opnd)) {
if (TEST(DR_DISASM_INTEL, DYNAMO_OPTION(disasm_mask))
/* Always negating for ARM and AArch64. I would do the same for x86 but
* I don't want to break any existing scripts.
*/
IF_NOT_X86(|| true)) {
/* windbg negates if top byte is 0xff
* for x64 udis86 negates if at all negative
*/
if (TEST(DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask)))
print_to_buffer(buf, bufsz, sofar, ", #");
if (IF_X64_ELSE(disp < 0, (disp & 0xff000000) == 0xff000000)) {
disp = -disp;
print_to_buffer(buf, bufsz, sofar, "-");
} else if (base != REG_NULL || index != REG_NULL) {
if (TEST(DR_OPND_NEGATED, opnd_get_flags(opnd)))
print_to_buffer(buf, bufsz, sofar, "-");
else if (!TEST(DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask)))
print_to_buffer(buf, bufsz, sofar, "+");
}
} else if (TEST(DR_DISASM_ATT, DYNAMO_OPTION(disasm_mask))) {
/* There seems to be a discrepency between windbg and binutils. The latter
* prints a '-' displacement for negative displacements both for att and
* intel. We are doing the same for att syntax, while we're following windbg
* for intel's syntax. XXX i#3574: should we do the same for intel's syntax?
*/
if (disp < 0) {
disp = -disp;
print_to_buffer(buf, bufsz, sofar, "-");
}
}
if (TEST(DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask)))
print_to_buffer(buf, bufsz, sofar, "%d", disp);
else if ((unsigned)disp <= 0xff && !opnd_is_disp_force_full(opnd))
print_to_buffer(buf, bufsz, sofar, "0x%02x", disp);
else if ((unsigned)disp <= 0xffff IF_X86(&&opnd_is_disp_short_addr(opnd)))
print_to_buffer(buf, bufsz, sofar, "0x%04x", disp);
else /* there are no 64-bit displacements */
print_to_buffer(buf, bufsz, sofar, "0x%08x", disp);
}
if (!TESTANY(DR_DISASM_INTEL | DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask))) {
if (base != REG_NULL || index != REG_NULL) {
print_to_buffer(buf, bufsz, sofar, "(");
if (base != REG_NULL)
reg_disassemble(buf, bufsz, sofar, base, 0, "", "");
if (index != REG_NULL) {
reg_disassemble(buf, bufsz, sofar, index, opnd_get_flags(opnd), ",", "");
opnd_base_disp_scale_disassemble(buf, bufsz, sofar, opnd);
}
print_to_buffer(buf, bufsz, sofar, ")");
}
}
if (TESTANY(DR_DISASM_INTEL | DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask)))
print_to_buffer(buf, bufsz, sofar, "]");
}
static bool
print_known_pc_target(char *buf, size_t bufsz, size_t *sofar INOUT, dcontext_t *dcontext,
byte *target)
{
bool printed = false;
# ifndef STANDALONE_DECODER
/* symbolic addresses */
if (ENTER_DR_HOOK != NULL && target == (app_pc)ENTER_DR_HOOK) {
print_to_buffer(buf, bufsz, sofar, "$" PFX " <enter_dynamorio_hook> ", target);
printed = true;
} else if (EXIT_DR_HOOK != NULL && target == (app_pc)EXIT_DR_HOOK) {
print_to_buffer(buf, bufsz, sofar, "$" PFX " <exit_dynamorio_hook> ", target);
printed = true;
} else if (dcontext != NULL && dynamo_initialized && !standalone_library) {
const char *gencode_routine = NULL;
const char *ibl_brtype;
const char *ibl_name = get_ibl_routine_name(dcontext, target, &ibl_brtype);
# ifdef X86
if (ibl_name == NULL && in_coarse_stub_prefixes(target) &&
*target == JMP_OPCODE) {
ibl_name = get_ibl_routine_name(dcontext, PC_RELATIVE_TARGET(target + 1),
&ibl_brtype);
}
# elif defined(ARM)
if (ibl_name == NULL && in_coarse_stub_prefixes(target)) {
/* FIXME i#1575: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
# endif
# ifdef WINDOWS
/* must test first, as get_ibl_routine_name will think "bb_ibl_indjmp" */
if (dcontext != GLOBAL_DCONTEXT) {
if (target == shared_syscall_routine(dcontext))
gencode_routine = "shared_syscall";
else if (target == unlinked_shared_syscall_routine(dcontext))
gencode_routine = "unlinked_shared_syscall";
} else {
if (target == shared_syscall_routine_ex(dcontext _IF_X64(GENCODE_X64)))
gencode_routine = "shared_syscall";
else if (target ==
unlinked_shared_syscall_routine_ex(dcontext _IF_X64(GENCODE_X64)))
gencode_routine = "unlinked_shared_syscall";
# ifdef X64
else if (target == shared_syscall_routine_ex(dcontext _IF_X64(GENCODE_X86)))
gencode_routine = "x86_shared_syscall";
else if (target ==
unlinked_shared_syscall_routine_ex(dcontext _IF_X64(GENCODE_X86)))
gencode_routine = "x86_unlinked_shared_syscall";
# endif
}
# endif
if (ibl_name) {
/* can't use gencode_routine since need two strings here */
print_to_buffer(buf, bufsz, sofar, "$" PFX " <%s_%s>", target, ibl_name,
ibl_brtype);
printed = true;
} else if (SHARED_FRAGMENTS_ENABLED() &&
target == fcache_return_shared_routine(IF_X86_64(GENCODE_X64)))
gencode_routine = "fcache_return";
# ifdef X64
else if (SHARED_FRAGMENTS_ENABLED() &&
target == fcache_return_shared_routine(IF_X86_64(GENCODE_X86)))
gencode_routine = "x86_fcache_return";
# endif
else if (dcontext != GLOBAL_DCONTEXT && target == fcache_return_routine(dcontext))
gencode_routine = "fcache_return";
else if (DYNAMO_OPTION(coarse_units)) {
if (target == fcache_return_coarse_prefix(target, NULL) ||
target == fcache_return_coarse_routine(IF_X86_64(GENCODE_X64)))
gencode_routine = "fcache_return_coarse";
else if (target == trace_head_return_coarse_prefix(target, NULL) ||
target == trace_head_return_coarse_routine(IF_X86_64(GENCODE_X64)))
gencode_routine = "trace_head_return_coarse";
# ifdef X64
else if (target == fcache_return_coarse_prefix(target, NULL) ||
target == fcache_return_coarse_routine(IF_X86_64(GENCODE_X86)))
gencode_routine = "x86_fcache_return_coarse";
else if (target == trace_head_return_coarse_prefix(target, NULL) ||
target == trace_head_return_coarse_routine(IF_X86_64(GENCODE_X86)))
gencode_routine = "x86_trace_head_return_coarse";
# endif
}
# ifdef PROFILE_RDTSC
else if ((void *)target == profile_fragment_enter)
gencode_routine = "profile_fragment_enter";
# endif
# ifdef TRACE_HEAD_CACHE_INCR
else if ((void *)target == trace_head_incr_routine(dcontext))
gencode_routine = "trace_head_incr";
# endif
if (gencode_routine != NULL) {
print_to_buffer(buf, bufsz, sofar, "$" PFX " <%s> ", target, gencode_routine);
printed = true;
} else if (!printed && fragment_initialized(dcontext)) {
/* see if target is in a fragment */
bool alloc = false;
fragment_t *fragment;
# ifdef DEBUG
fragment_t wrapper;
/* Unfortunately our fast lookup by fcache unit has lock
* ordering issues which we get around by using the htable
* method, though that won't find invisible fragments
* (FIXME: for those could perhaps pass in a pointer).
* For !DEADLOCK_AVOIDANCE, OWN_MUTEX's conservative imprecision
* is fine.
*/
if ((SHARED_FRAGMENTS_ENABLED() &&
self_owns_recursive_lock(&change_linking_lock))
/* HACK to avoid recursion if the pclookup invokes
* decode_fragment() (for coarse target) and it then invokes
* disassembly
*/
IF_DEBUG(
|| (dcontext != GLOBAL_DCONTEXT && dcontext->in_opnd_disassemble))) {
fragment =
fragment_pclookup_by_htable(dcontext, (void *)target, &wrapper);
} else {
bool prev_flag = false;
if (dcontext != GLOBAL_DCONTEXT) {
prev_flag = dcontext->in_opnd_disassemble;
dcontext->in_opnd_disassemble = true;
}
# endif /* shouldn't be any logging so no disasm in the middle of sensitive ops \
*/
fragment = fragment_pclookup_with_linkstubs(dcontext, target, &alloc);
# ifdef DEBUG
if (dcontext != GLOBAL_DCONTEXT)
dcontext->in_opnd_disassemble = prev_flag;
}
# endif
if (fragment != NULL) {
if (FCACHE_ENTRY_PC(fragment) == (cache_pc)target ||
FCACHE_PREFIX_ENTRY_PC(fragment) == (cache_pc)target ||
FCACHE_IBT_ENTRY_PC(fragment) == (cache_pc)target) {
# ifdef DEBUG
print_to_buffer(buf, bufsz, sofar, "$" PFX " <fragment %d> ", target,
fragment->id);
# else
print_to_buffer(buf, bufsz, sofar, "$" PFX " <fragment " PFX "> ",
target, fragment->tag);
# endif
printed = true;
} else if (!TEST(FRAG_FAKE, fragment->flags)) {
/* check exit stubs */
linkstub_t *ls;
int ls_num = 0;
CLIENT_ASSERT(!TEST(FRAG_FAKE, fragment->flags),
"opnd_disassemble: invalid target");
for (ls = FRAGMENT_EXIT_STUBS(fragment); ls;
ls = LINKSTUB_NEXT_EXIT(ls)) {
if (target == EXIT_STUB_PC(dcontext, fragment, ls)) {
print_to_buffer(buf, bufsz, sofar, "$" PFX " <exit stub %d> ",
target, ls_num);
printed = true;
break;
}
ls_num++;
}
}
if (alloc)
fragment_free(dcontext, fragment);
} else if (coarse_is_entrance_stub(target)) {
print_to_buffer(buf, bufsz, sofar,
"$" PFX " <entrance stub for " PFX "> ", target,
entrance_stub_target_tag(target, NULL));
printed = true;
}
}
} else if (dynamo_initialized && !SHARED_FRAGMENTS_ENABLED() && !standalone_library) {
print_to_buffer(buf, bufsz, sofar, "NULL DCONTEXT! ");
}
# endif /* !STANDALONE_DECODER */
return printed;
}
void
internal_opnd_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT,
dcontext_t *dcontext, opnd_t opnd, bool use_size_sfx)
{
if (opnd_disassemble_arch(buf, bufsz, sofar, opnd))
return;
switch (opnd.kind) {
case NULL_kind: return;
case IMMED_INTEGER_kind: {
int sz = opnd_size_in_bytes(opnd_get_size(opnd));
ptr_int_t val = opnd_get_immed_int(opnd);
const char *sign = "";
# ifdef ARM
/* On ARM we have few pointer-sized immeds so let's always negate */
if (val < 0 && opnd_size_in_bytes(opnd_get_size(opnd)) < sizeof(void *)) {
sign = "-";
val = -val;
}
# endif
/* PR 327775: when we don't know other operands we truncate.
* We rely on instr_disassemble to temporarily change operand
* size to sign-extend to match the size of adjacent operands.
*/
if (TEST(DR_DISASM_ARM, DYNAMO_OPTION(disasm_mask))) {
print_to_buffer(buf, bufsz, sofar, "%s%s%d", immed_prefix(), sign, (uint)val);
} else if (sz <= 1) {
print_to_buffer(buf, bufsz, sofar, "%s%s0x%02x", immed_prefix(), sign,
(uint)((byte)val));
} else if (sz <= 2) {
print_to_buffer(buf, bufsz, sofar, "%s%s0x%04x", immed_prefix(), sign,
(uint)((unsigned short)val));
} else if (sz <= 4) {
print_to_buffer(buf, bufsz, sofar, "%s%s0x%08x", immed_prefix(), sign,
(uint)val);
} else {
int64 val64 = val;
IF_NOT_X64({
if (opnd_is_immed_int64(opnd))
val64 = opnd_get_immed_int64(opnd);
});
print_to_buffer(buf, bufsz, sofar, "%s%s0x" ZHEX64_FORMAT_STRING,
immed_prefix(), sign, val64);
}
} break;
case IMMED_FLOAT_kind: {
/* need to save floating state around float printing */
PRESERVE_FLOATING_POINT_STATE({
uint top;
uint bottom;
const char *sign;
double_print(opnd_get_immed_float(opnd), 6, &top, &bottom, &sign);
print_to_buffer(buf, bufsz, sofar, "%s%s%u.%.6u", immed_prefix(), sign, top,
bottom);
});
break;
}
case PC_kind: {
app_pc target = opnd_get_pc(opnd);
if (!print_known_pc_target(buf, bufsz, sofar, dcontext, target)) {
print_to_buffer(buf, bufsz, sofar, "%s" PFX, immed_prefix(), target);
}
break;
}
case FAR_PC_kind:
/* constant is selector and not a SEG_ constant */
print_to_buffer(buf, bufsz, sofar, "0x%04x:" PFX,
(ushort)opnd_get_segment_selector(opnd), opnd_get_pc(opnd));
break;
case INSTR_kind:
print_to_buffer(buf, bufsz, sofar, "@" PFX, opnd_get_instr(opnd));
break;
case FAR_INSTR_kind:
/* constant is selector and not a SEG_ constant */
print_to_buffer(buf, bufsz, sofar, "0x%04x:@" PFX,
(ushort)opnd_get_segment_selector(opnd), opnd_get_instr(opnd));
break;
case MEM_INSTR_kind:
print_to_buffer(buf, bufsz, sofar, IF_X64("<re> ") "@" PFX "+%d",
opnd_get_instr(opnd), opnd_get_mem_instr_disp(opnd));
break;
case REG_kind:
reg_disassemble(buf, bufsz, sofar, opnd_get_reg(opnd), opnd_get_flags(opnd), "",
"");
break;
case BASE_DISP_kind: opnd_base_disp_disassemble(buf, bufsz, sofar, opnd); break;
# ifdef X64
case REL_ADDR_kind:
print_to_buffer(buf, bufsz, sofar, "<rel> ");
/* fall-through */
case ABS_ADDR_kind:
opnd_mem_disassemble_prefix(buf, bufsz, sofar, opnd);
if (opnd_get_segment(opnd) != REG_NULL)
reg_disassemble(buf, bufsz, sofar, opnd_get_segment(opnd), 0, "", ":");
print_to_buffer(buf, bufsz, sofar, PFX "%s", opnd_get_addr(opnd),
TEST(DR_DISASM_INTEL, DYNAMO_OPTION(disasm_mask)) ? "]" : "");
break;
# endif
default:
print_to_buffer(buf, bufsz, sofar, "UNKNOWN OPERAND TYPE %d", opnd.kind);
CLIENT_ASSERT(false, "opnd_disassemble: invalid opnd type");
}
if (use_size_sfx) {
switch (opnd.kind) {
case NULL_kind:
case IMMED_INTEGER_kind:
case IMMED_FLOAT_kind:
case PC_kind:
case FAR_PC_kind: break;
case REG_kind:
if (!opnd_is_reg_partial(opnd))
break;
/* fall-through */
default: {
const char *size_str = opnd_size_suffix_dr(opnd);
if (size_str[0] != '\0')
print_to_buffer(buf, bufsz, sofar, "[%s]", size_str);
}
}
}
}
void
opnd_disassemble(dcontext_t *dcontext, opnd_t opnd, file_t outfile)
{
char buf[MAX_OPND_DIS_SZ];
size_t sofar = 0;
internal_opnd_disassemble(buf, BUFFER_SIZE_ELEMENTS(buf), &sofar, dcontext, opnd,
false /*don't know*/);
/* not propagating bool return vals of print_to_buffer but should be plenty big */
CLIENT_ASSERT(sofar < BUFFER_SIZE_ELEMENTS(buf) - 1, "internal buffer too small");
os_write(outfile, buf, sofar);
}
size_t
opnd_disassemble_to_buffer(dcontext_t *dcontext, opnd_t opnd, char *buf, size_t bufsz)
{
size_t sofar = 0;
internal_opnd_disassemble(buf, bufsz, &sofar, dcontext, opnd, false /*don't know*/);
return sofar;
}
static int
print_bytes_to_file(file_t outfile, byte *pc, byte *next_pc, instr_t *inst)
{
char buf[MAX_PC_DIS_SZ];
size_t sofar = 0;
int extra_sz =
print_bytes_to_buffer(buf, BUFFER_SIZE_ELEMENTS(buf), &sofar, pc, next_pc, inst);
CLIENT_ASSERT(sofar < BUFFER_SIZE_ELEMENTS(buf) - 1, "internal buffer too small");
os_write(outfile, buf, sofar);
return extra_sz;
}
static void
print_extra_bytes_to_file(file_t outfile, byte *pc, byte *next_pc, int extra_sz,
const char *extra_bytes_prefix)
{
char buf[MAX_PC_DIS_SZ];
size_t sofar = 0;
print_extra_bytes_to_buffer(buf, BUFFER_SIZE_ELEMENTS(buf), &sofar, pc, next_pc,
extra_sz, extra_bytes_prefix);
CLIENT_ASSERT(sofar < BUFFER_SIZE_ELEMENTS(buf) - 1, "internal buffer too small");
os_write(outfile, buf, sofar);
}
/* Disassembles the instruction at pc and prints the result to buf.
* Returns a pointer to the pc of the next instruction.
* Returns NULL if the instruction at pc is invalid.
*/
static byte *
internal_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT, dcontext_t *dcontext,
byte *pc, byte *orig_pc, bool with_pc, bool with_bytes,
const char *extra_bytes_prefix)
{
int extra_sz = 0;
byte *next_pc;
instr_t instr;
bool valid = true;
instr_init(dcontext, &instr);
if (orig_pc != pc)
next_pc = decode_from_copy(dcontext, pc, orig_pc, &instr);
else
next_pc = decode(dcontext, pc, &instr);
if (next_pc == NULL) {
valid = false;
/* HACK: if decode_fast thinks it knows size use that */
next_pc = decode_next_pc(dcontext, pc);
}
if (next_pc == NULL) {
valid = false;
/* last resort: arbitrarily pick 4 bytes */
next_pc = pc + 4;
}
if (with_pc) {
print_to_buffer(buf, bufsz, sofar, " " PFX " ",
PC_AS_LOAD_TGT(instr_get_isa_mode(&instr), orig_pc));
}
if (with_bytes) {
extra_sz = print_bytes_to_buffer(buf, bufsz, sofar, pc, next_pc, &instr);
}
internal_instr_disassemble(buf, bufsz, sofar, dcontext, &instr);
/* XXX: should we give caller control over whether \n or \r\n? */
print_to_buffer(buf, bufsz, sofar, "\n");
if (with_bytes && extra_sz > 0) {
if (with_pc)
print_to_buffer(buf, bufsz, sofar, IF_X64_ELSE("%21s", "%13s"), " ");
print_extra_bytes_to_buffer(buf, bufsz, sofar, pc, next_pc, extra_sz,
extra_bytes_prefix);
}
instr_free(dcontext, &instr);
return (valid ? next_pc : NULL);
}
/* Disassembles the instruction at pc and prints the result to outfile.
* Returns a pointer to the pc of the next instruction.
* Returns NULL if the instruction at pc is invalid.
*/
static byte *
internal_disassemble_to_file(dcontext_t *dcontext, byte *pc, byte *orig_pc,
file_t outfile, bool with_pc, bool with_bytes,
const char *extra_bytes_prefix)
{
char buf[MAX_PC_DIS_SZ];
size_t sofar = 0;
byte *next =
internal_disassemble(buf, BUFFER_SIZE_ELEMENTS(buf), &sofar, dcontext, pc,
orig_pc, with_pc, with_bytes, extra_bytes_prefix);
/* not propagating bool return vals of print_to_buffer but should be plenty big */
CLIENT_ASSERT(sofar < BUFFER_SIZE_ELEMENTS(buf) - 1, "internal buffer too small");
os_write(outfile, buf, sofar);
return next;
}
/****************************************************************************
* Exported routines
*/
/* Disassembles the instruction at pc and prints the result to outfile
* Returns a pointer to the pc of the next instruction.
* Returns NULL if the instruction at pc is invalid.
*/
byte *
disassemble(dcontext_t *dcontext, byte *pc, file_t outfile)
{
return internal_disassemble_to_file(dcontext, pc, pc, outfile, true, false, "");
}
/* Disassembles a single instruction and prints its pc and
* bytes then the disassembly.
* Returns the pc of the next instruction.
* If the instruction at pc is invalid, guesses a size!
* This is b/c we internally use that feature, but I don't want to export it:
* so this unexported routine maintains it, and we don't have to change all
* our call sites to check for NULL.
*/
byte *
disassemble_with_bytes(dcontext_t *dcontext, byte *pc, file_t outfile)
{
byte *next_pc =
internal_disassemble_to_file(dcontext, pc, pc, outfile, true, true, "");
if (next_pc == NULL) {
next_pc = decode_next_pc(dcontext, pc);
if (next_pc == NULL)
next_pc = pc + 4; /* guess size */
}
return next_pc;
}
/* Disassembles a single instruction, optionally printing its pc (if show_pc)
* and its raw bytes (show_bytes) beforehand.
* Returns the pc of the next instruction.
* FIXME: vs disassemble_with_bytes -- didn't want to update all callers
* so leaving, though should probably remove.
* Returns NULL if the instruction at pc is invalid.
*/
byte *
disassemble_with_info(dcontext_t *dcontext, byte *pc, file_t outfile, bool show_pc,
bool show_bytes)
{
return internal_disassemble_to_file(dcontext, pc, pc, outfile, show_pc, show_bytes,
"");
}
/*
* Decodes the instruction at address \p copy_pc as though
* it were located at address \p orig_pc, and then prints the
* instruction to file \p outfile.
* Prior to the instruction the address \p orig_pc is printed if \p show_pc and the raw
* bytes are printed if \p show_bytes.
* Returns the address of the subsequent instruction after the copy at
* \p copy_pc, or NULL if the instruction at \p copy_pc is invalid.
*/
byte *
disassemble_from_copy(dcontext_t *dcontext, byte *copy_pc, byte *orig_pc, file_t outfile,
bool show_pc, bool show_bytes)
{
return internal_disassemble_to_file(dcontext, copy_pc, orig_pc, outfile, show_pc,
show_bytes, "");
}
byte *
disassemble_to_buffer(dcontext_t *dcontext, byte *pc, byte *orig_pc, bool show_pc,
bool show_bytes, char *buf, size_t bufsz, int *printed OUT)
{
size_t sofar = 0;
byte *next = internal_disassemble(buf, bufsz, &sofar, dcontext, pc, orig_pc, show_pc,
show_bytes, "");
if (printed != NULL)
*printed = (int)sofar;
return next;
}
static void
instr_disassemble_opnds_noimplicit(char *buf, size_t bufsz, size_t *sofar INOUT,
dcontext_t *dcontext, instr_t *instr)
{
/* We need to find the non-implicit operands */
const instr_info_t *info;
int i, num;
byte optype;
/* avoid duplicate on ALU: only happens w/ 2dst, 3srcs */
byte optype_already[3] = { 0, 0, 0 /*0 == TYPE_NONE*/ };
opnd_t opnd;
bool prev = false, multiple_encodings = false;
info = instr_get_instr_info(instr);
if (info != NULL && get_next_instr_info(info) != NULL &&
instr_info_extra_opnds(info) == NULL)
multiple_encodings = true;
IF_X86({ /* XXX i#1683: not using yet on ARM so avoiding the cost */
/* XXX: avoid the cost of encoding unless at L4 */
info = get_encoding_info(instr);
if (info == NULL) {
print_to_buffer(buf, bufsz, sofar, "<INVALID>");
return;
}
});
num = dsts_first() ? instr_num_dsts(instr) : instr_num_srcs(instr);
for (i = 0; i < num; i++) {
bool printing;
opnd = dsts_first() ? instr_get_dst(instr, i) : instr_get_src(instr, i);
IF_X86_ELSE({ optype = instr_info_opnd_type(info, !dsts_first(), i); },
{
/* XXX i#1683: -syntax_arm currently fails here on register lists
* and will trigger the assert in instr_info_opnd_type(). We
* don't use the optype on ARM yet though.
*/
optype = 0;
});
bool is_mask = !dsts_first() && !instr_is_opmask(instr) && opnd_is_reg(opnd) &&
reg_is_opmask(opnd_get_reg(opnd));
print_to_buffer(buf, bufsz, sofar, is_mask ? "{" : "");
printing =
opnd_disassemble_noimplicit(buf, bufsz, sofar, dcontext, instr, optype, opnd,
prev, multiple_encodings, dsts_first(), &i);
print_to_buffer(buf, bufsz, sofar, is_mask ? "}" : "");
/* w/o the "printing" check we suppress "push esp" => "push" */
if (printing && i < 3)
optype_already[i] = optype;
prev = printing || prev;
}
num = dsts_first() ? instr_num_srcs(instr) : instr_num_dsts(instr);
for (i = 0; i < num; i++) {
bool print = true;
opnd = dsts_first() ? instr_get_src(instr, i) : instr_get_dst(instr, i);
IF_X86_ELSE({ optype = instr_info_opnd_type(info, dsts_first(), i); },
{
/* XXX i#1683: see comment above */
optype = 0;
});
IF_X86({
/* PR 312458: still not matching Intel-style tools like windbg or udis86:
* we need to suppress certain implicit operands, such as:
* - div dx, ax
* - imul ax
* - idiv edx, eax
* - in al
*/
/* Don't re-do src==dst of ALU ops */
print = ((optype != optype_already[0] && optype != optype_already[1] &&
optype != optype_already[2]) ||
/* Don't suppress 2nd of st* if FP ALU */
(i == 0 && opnd_is_reg(opnd) && reg_is_fp(opnd_get_reg(opnd))));
});
if (print) {
bool is_mask = dsts_first() && !instr_is_opmask(instr) && opnd_is_reg(opnd) &&
reg_is_opmask(opnd_get_reg(opnd));
print_to_buffer(buf, bufsz, sofar, is_mask ? "{" : "");
prev = opnd_disassemble_noimplicit(buf, bufsz, sofar, dcontext, instr, optype,
opnd, prev, multiple_encodings,
!dsts_first(), &i) ||
prev;
print_to_buffer(buf, bufsz, sofar, is_mask ? "}" : "");
}
}
}
static bool
instr_needs_opnd_size_sfx(instr_t *instr)
{
# ifdef DISASM_SUFFIX_ONLY_ON_MISMATCH /* disabled: see below */
opnd_t src, dst;
if (TEST(DR_DISASM_NO_OPND_SIZE, DYNAMO_OPTION(disasm_mask)))
return false;
/* We really only care about the primary src and primary dst */
if (instr_num_srcs(instr) == 0 || instr_num_dsts(instr) == 0)
return false;
src = instr_get_src(instr, 0);
/* Avoid opcodes that have a 1-byte immed but all other operands
* the same size from triggering suffixes
*/
if (opnd_is_immed(src) && instr_num_srcs(instr) > 1)
return false;
dst = instr_get_dst(instr, 0);
return (opnd_get_size(src) != opnd_get_size(dst) ||
/* We haven't sign-extended yet -- if we did maybe we wouldn't
* need this. Good to show size on mov of immed into memory.
*/
opnd_is_immed_int(src) || opnd_is_reg_partial(src) ||
opnd_is_reg_partial(dst));
# else
/* Originally I tried only showing the sizes when they mismatch or
* can't be inferred (code above), but that gets a little tricky,
* and IMHO it's nice to see the size of all memory operands. We
* never print for immeds or non-partial regs, so we can just set
* to true for all instructions.
*/
if (TEST(DR_DISASM_NO_OPND_SIZE, DYNAMO_OPTION(disasm_mask)))
return false;
return true;
# endif
}
static void
sign_extend_immed(instr_t *instr, int srcnum, opnd_t *src)
{
opnd_size_t opsz = OPSZ_NA;
bool resize = true;
# if !defined(X86) && !defined(ARM)
/* Automatic sign extension is probably only useful on Intel but
* is left enabled on ARM (AArch32) as it is what some tests expect.
*/
return;
# endif
if (opnd_is_immed_int(*src)) {
/* PR 327775: force operand to sign-extend if all other operands
* are of a larger and identical-to-each-other size (since we
* don't want to extend immeds used in stores) and are not
* multimedia registers (since immeds there are always indices).
*/
int j;
for (j = 0; j < instr_num_srcs(instr); j++) {
if (j != srcnum) {
if (opnd_is_reg(instr_get_src(instr, j)) &&
!reg_is_gpr(opnd_get_reg(instr_get_src(instr, j)))) {
resize = false;
break;
}
if (opsz == OPSZ_NA)
opsz = opnd_get_size(instr_get_src(instr, j));
else if (opsz != opnd_get_size(instr_get_src(instr, j))) {
resize = false;
break;
}
}
}
for (j = 0; j < instr_num_dsts(instr); j++) {
if (opnd_is_reg(instr_get_dst(instr, j)) &&
!reg_is_gpr(opnd_get_reg(instr_get_dst(instr, j)))) {
resize = false;
break;
}
if (opsz == OPSZ_NA)
opsz = opnd_get_size(instr_get_dst(instr, j));
else if (opsz != opnd_get_size(instr_get_dst(instr, j))) {
resize = false;
break;
}
}
if (resize && opsz != OPSZ_NA && !instr_is_interrupt(instr))
opnd_set_size(src, opsz);
}
}
/*
* Prints the instruction instr to file outfile.
* Does not print addr16 or data16 prefixes for other than just-decoded instrs,
* and does not check that the instruction has a valid encoding.
* Prints each operand with leading zeros indicating the size.
*/
static void
internal_instr_disassemble(char *buf, size_t bufsz, size_t *sofar INOUT,
dcontext_t *dcontext, instr_t *instr)
{
int i;
const instr_info_t *info;
const char *name;
int name_width = 6;
bool use_size_sfx = false;
size_t offs_pre_name, offs_post_name, offs_pre_opnds;
if (!instr_valid(instr)) {
print_to_buffer(buf, bufsz, sofar, "<INVALID>");
return;
} else if (instr_is_label(instr)) {
print_to_buffer(buf, bufsz, sofar, "<label>");
return;
} else if (instr_opcode_valid(instr)) {
# ifdef AARCH64
/* We do not use instr_info_t encoding info on AArch64. */
info = NULL;
name = get_opcode_name(instr_get_opcode(instr));
# else
info = instr_get_instr_info(instr);
name = info->name;
# endif
} else
name = "<RAW>";
print_instr_prefixes(dcontext, instr, buf, bufsz, sofar);
offs_pre_name = *sofar;
print_opcode_name(instr, name, buf, bufsz, sofar);
offs_post_name = *sofar;
name_width -= (int)(offs_post_name - offs_pre_name);
print_to_buffer(buf, bufsz, sofar, " ");
for (i = 0; i < name_width; i++)
print_to_buffer(buf, bufsz, sofar, " ");
offs_pre_opnds = *sofar;
/* operands */
if (!instr_operands_valid(instr)) {
/* we could decode the raw bits, but caller should if they want that */
byte *raw = instr_get_raw_bits(instr);
uint len = instr_length(dcontext, instr);
byte *b;
print_to_buffer(buf, bufsz, sofar, "<raw " PFX "-" PFX " ==", raw, raw + len);
for (b = raw; b < raw + len && b < raw + 9; b++)
print_to_buffer(buf, bufsz, sofar, " %02x", *b);
if (len > 9)
print_to_buffer(buf, bufsz, sofar, " ...");
print_to_buffer(buf, bufsz, sofar, ">");
return;
}
if (TESTANY(DR_DISASM_INTEL | DR_DISASM_ATT | DR_DISASM_ARM,
DYNAMO_OPTION(disasm_mask))) {
instr_disassemble_opnds_noimplicit(buf, bufsz, sofar, dcontext, instr);
/* we avoid trailing spaces if no operands */
if (*sofar == offs_pre_opnds) {
*sofar = offs_post_name;
buf[offs_post_name] = '\0';
}
return;
}
use_size_sfx = instr_needs_opnd_size_sfx(instr);
for (i = 0; i < instr_num_srcs(instr); i++) {
opnd_t src = instr_get_src(instr, i);
if (i > 0)
print_to_buffer(buf, bufsz, sofar, " ");
sign_extend_immed(instr, i, &src);
/* XXX i#1312: we may want to more closely resemble ATT and Intel syntax w.r.t.
* EVEX mask operand. Tools tend to print the mask in conjunction with the
* destination in {} brackets.
*/
bool is_mask = !instr_is_opmask(instr) && opnd_is_reg(src) &&
reg_is_opmask(opnd_get_reg(src));
print_to_buffer(buf, bufsz, sofar, is_mask ? "{" : "");
internal_opnd_disassemble(buf, bufsz, sofar, dcontext, src, use_size_sfx);
print_to_buffer(buf, bufsz, sofar, is_mask ? "}" : "");
}
if (instr_num_dsts(instr) > 0) {
print_to_buffer(buf, bufsz, sofar, " ->");
for (i = 0; i < instr_num_dsts(instr); i++) {
print_to_buffer(buf, bufsz, sofar, " ");
internal_opnd_disassemble(buf, bufsz, sofar, dcontext,
instr_get_dst(instr, i), use_size_sfx);
}
}
/* we avoid trailing spaces if no operands */
if (*sofar == offs_pre_opnds) {
*sofar = offs_post_name;
buf[offs_post_name] = '\0';
}
}
/*
* Prints the instruction instr to file outfile.
* Does not print addr16 or data16 prefixes for other than just-decoded instrs,
* and does not check that the instruction has a valid encoding.
* Prints each operand with leading zeros indicating the size.
*/
void
instr_disassemble(dcontext_t *dcontext, instr_t *instr, file_t outfile)
{
char buf[MAX_INSTR_DIS_SZ];
size_t sofar = 0;
internal_instr_disassemble(buf, BUFFER_SIZE_ELEMENTS(buf), &sofar, dcontext, instr);
/* not propagating bool return vals of print_to_buffer but should be plenty big */
CLIENT_ASSERT(sofar < BUFFER_SIZE_ELEMENTS(buf) - 1, "internal buffer too small");
os_write(outfile, buf, sofar);
}
/*
* Prints the instruction \p instr to the buffer \p buf.
* Always null-terminates, and will not print more than \p bufsz characters,
* which includes the final null character.
* Returns the number of characters printed, not including the final null.
*
* Does not print address-size or data-size prefixes for other than
* just-decoded instrs, and does not check that the instruction has a
* valid encoding. Prints each operand with leading zeros indicating
* the size.
* Uses DR syntax unless otherwise specified (see disassemble_set_syntax()).
*/
size_t
instr_disassemble_to_buffer(dcontext_t *dcontext, instr_t *instr, char *buf, size_t bufsz)
{
size_t sofar = 0;
internal_instr_disassemble(buf, bufsz, &sofar, dcontext, instr);
return sofar;
}
# ifndef STANDALONE_DECODER
static inline const char *
exit_stub_type_desc(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
if (LINKSTUB_DIRECT(l->flags)) {
if (EXIT_IS_CALL(l->flags))
return "call";
if (EXIT_IS_JMP(l->flags))
return "jmp/jcc";
return "fall-through/speculated/IAT";
/* FIXME: mark these appropriately */
} else {
CLIENT_ASSERT(LINKSTUB_INDIRECT(l->flags), "invalid exit stub");
if (TEST(LINK_RETURN, l->flags))
return "ret";
if (EXIT_IS_CALL(l->flags))
return "indcall";
if (TEST(LINK_JMP, l->flags)) /* JMP or IND_JMP_PLT */
return "indjmp";
# ifdef WINDOWS
if (is_shared_syscall_routine(dcontext, EXIT_TARGET_TAG(dcontext, f, l)))
return "shared_syscall";
# endif
}
CLIENT_ASSERT(false, "unknown exit stub type");
return "<unknown>";
}
/* Disassemble and pretty-print the generated code for fragment f.
* Header and body control whether header info and code itself are printed
*/
static void
common_disassemble_fragment(dcontext_t *dcontext, fragment_t *f_in, file_t outfile,
bool header, bool body)
{
cache_pc entry_pc, prefix_pc;
cache_pc pc;
cache_pc body_end_pc;
cache_pc end_pc;
linkstub_t *l;
int exit_num = 0;
# ifdef PROFILE_RDTSC
cache_pc profile_end = 0;
# endif
bool alloc;
fragment_t *f = f_in;
if (header) {
# ifdef DEBUG
print_file(
outfile, "Fragment %d, tag " PFX ", flags 0x%x, %s%s%s%ssize %d%s%s:\n",
f->id,
# else
print_file(
outfile, "Fragment tag " PFX ", flags 0x%x, %s%s%s%ssize %d%s%s:\n",
# endif
f->tag, f->flags, IF_X64_ELSE(FRAG_IS_32(f->flags) ? "32-bit, " : "", ""),
TEST(FRAG_COARSE_GRAIN, f->flags) ? "coarse, " : "",
(TEST(FRAG_SHARED, f->flags)
? "shared, "
: (SHARED_FRAGMENTS_ENABLED()
? (TEST(FRAG_TEMP_PRIVATE, f->flags) ? "private temp, "
: "private, ")
: "")),
(TEST(FRAG_IS_TRACE, f->flags))
? "trace, "
: (TEST(FRAG_IS_TRACE_HEAD, f->flags)) ? "tracehead, " : "",
f->size, (TEST(FRAG_CANNOT_BE_TRACE, f->flags)) ? ", cannot be trace" : "",
(TEST(FRAG_MUST_END_TRACE, f->flags)) ? ", must end trace" : "",
(TEST(FRAG_CANNOT_DELETE, f->flags)) ? ", cannot delete" : "");
DOLOG(2, LOG_SYMBOLS, { /* FIXME: affects non-logging uses... dump_traces, etc. */
char symbolbuf[MAXIMUM_SYMBOL_LENGTH];
print_symbolic_address(f->tag, symbolbuf,
sizeof(symbolbuf), false);
print_file(outfile, "\t%s\n", symbolbuf);
});
}
if (!body)
return;
if (body && TEST(FRAG_FAKE, f->flags)) {
alloc = true;
f = fragment_recreate_with_linkstubs(dcontext, f_in);
} else {
alloc = false;
}
end_pc = f->start_pc + f->size;
body_end_pc = fragment_body_end_pc(dcontext, f);
entry_pc = FCACHE_ENTRY_PC(f);
prefix_pc = FCACHE_PREFIX_ENTRY_PC(f);
pc = FCACHE_IBT_ENTRY_PC(f);
if (pc != entry_pc) {
if (pc != prefix_pc) {
/* indirect branch target prefix exists */
print_file(outfile, " -------- indirect branch target entry: --------\n");
}
while (pc < entry_pc) {
if (pc == prefix_pc) {
print_file(outfile, " -------- prefix entry: --------\n");
}
pc = (cache_pc)disassemble_with_bytes(dcontext, (byte *)pc, outfile);
}
print_file(outfile, " -------- normal entry: --------\n");
}
CLIENT_ASSERT(pc == entry_pc, "disassemble_fragment: invalid prefix");
# ifdef PROFILE_RDTSC
if (dynamo_options.profile_times && (f->flags & FRAG_IS_TRACE) != 0) {
int sz = profile_call_size();
profile_end = pc + sz;
if (d_r_stats->loglevel < 3) {
/* don't print profile stuff to save space */
print_file(outfile, " " PFX "..." PFX " = profile code\n", pc,
(pc + sz - 1));
pc += sz;
} else {
/* print profile stuff, but delineate it: */
print_file(outfile, " -------- profile call: --------\n");
}
}
# endif
while (pc < body_end_pc) {
pc = (cache_pc)disassemble_with_bytes(dcontext, (byte *)pc, outfile);
# ifdef PROFILE_RDTSC
if (dynamo_options.profile_times && (f->flags & FRAG_IS_TRACE) != 0 &&
pc == profile_end) {
print_file(outfile, " -------- end profile call -----\n");
}
# endif
}
for (l = FRAGMENT_EXIT_STUBS(f); l; l = LINKSTUB_NEXT_EXIT(l)) {
cache_pc next_stop_pc;
linkstub_t *nxt;
/* store fragment pc since we don't want to walk forward in fragment */
cache_pc frag_pc = pc;
print_file(outfile,
" -------- exit stub %d: -------- <target: " PFX "> type: %s\n",
exit_num, EXIT_TARGET_TAG(dcontext, f, l),
exit_stub_type_desc(dcontext, f, l));
if (!EXIT_HAS_LOCAL_STUB(l->flags, f->flags)) {
if (EXIT_STUB_PC(dcontext, f, l) != NULL) {
pc = EXIT_STUB_PC(dcontext, f, l);
next_stop_pc = pc + linkstub_size(dcontext, f, l);
} else if (TEST(FRAG_COARSE_GRAIN, f->flags)) {
cache_pc cti_pc = EXIT_CTI_PC(f, l);
if (cti_pc == end_pc) {
/* must be elided final jmp */
print_file(outfile, " <no final jmp since elided>\n");
print_file(outfile, " <no stub since linked and frozen>\n");
CLIENT_ASSERT(pc == end_pc, "disassemble_fragment: invalid end");
next_stop_pc = end_pc;
} else {
pc = entrance_stub_from_cti(cti_pc);
if (coarse_is_entrance_stub(pc)) {
next_stop_pc = pc + linkstub_size(dcontext, f, l);
} else {
CLIENT_ASSERT(in_fcache(pc),
"disassemble_fragment: invalid exit stub");
print_file(outfile, " <no stub since linked and frozen>\n");
next_stop_pc = pc;
}
}
} else {
if (TEST(LINK_SEPARATE_STUB, l->flags))
print_file(outfile, " <no stub created since linked>\n");
else if (!EXIT_HAS_STUB(l->flags, f->flags))
print_file(outfile, " <no stub needed: -no_indirect_stubs>\n");
else
CLIENT_ASSERT(false, "disassemble_fragment: invalid exit stub");
next_stop_pc = pc;
}
} else {
for (nxt = LINKSTUB_NEXT_EXIT(l); nxt != NULL;
nxt = LINKSTUB_NEXT_EXIT(nxt)) {
if (EXIT_HAS_LOCAL_STUB(nxt->flags, f->flags))
break;
}
if (nxt != NULL)
next_stop_pc = EXIT_STUB_PC(dcontext, f, nxt);
else
next_stop_pc = pc + linkstub_size(dcontext, f, l);
if (LINKSTUB_DIRECT(l->flags))
next_stop_pc -= DIRECT_EXIT_STUB_DATA_SZ;
CLIENT_ASSERT(next_stop_pc != NULL, "disassemble_fragment: invalid stubs");
}
while (pc < next_stop_pc) {
pc = (cache_pc)disassemble_with_bytes(dcontext, (byte *)pc, outfile);
}
if (LINKSTUB_DIRECT(l->flags) && DIRECT_EXIT_STUB_DATA_SZ > 0) {
ASSERT(DIRECT_EXIT_STUB_DATA_SZ == sizeof(cache_pc));
if (stub_is_patched(f, EXIT_STUB_PC(dcontext, f, l))) {
print_file(outfile, " <stored target: " PFX ">\n",
*(cache_pc *)next_stop_pc);
}
pc += DIRECT_EXIT_STUB_DATA_SZ;
}
/* point pc back at tail of fragment code if it was off in separate stub land */
if (TEST(LINK_SEPARATE_STUB, l->flags))
pc = frag_pc;
exit_num++;
}
if (TEST(FRAG_SELFMOD_SANDBOXED, f->flags)) {
DOSTATS({ /* skip stored sz */ end_pc -= sizeof(uint); });
print_file(outfile, " -------- original code (from " PFX "-" PFX ") -------- \n",
f->tag, (f->tag + (end_pc - pc)));
while (pc < end_pc) {
pc = (cache_pc)disassemble_with_bytes(dcontext, (byte *)pc, outfile);
}
}
if (alloc)
fragment_free(dcontext, f);
}
# ifdef DEBUG
void
disassemble_fragment(dcontext_t *dcontext, fragment_t *f, bool just_header)
{
if ((d_r_stats->logmask & LOG_EMIT) != 0) {
common_disassemble_fragment(dcontext, f, THREAD, true, !just_header);
if (!just_header)
LOG(THREAD, LOG_EMIT, 1, "\n");
}
}
# endif /* DEBUG */
void
disassemble_fragment_header(dcontext_t *dcontext, fragment_t *f, file_t outfile)
{
common_disassemble_fragment(dcontext, f, outfile, true, false);
}
void
disassemble_fragment_body(dcontext_t *dcontext, fragment_t *f, file_t outfile)
{
common_disassemble_fragment(dcontext, f, outfile, false, true);
}
void
disassemble_app_bb(dcontext_t *dcontext, app_pc tag, file_t outfile)
{
instrlist_t *ilist = build_app_bb_ilist(dcontext, tag, outfile);
instrlist_clear_and_destroy(dcontext, ilist);
}
# endif /* !STANDALONE_DECODER */
/***************************************************************************/
/* Two entry points to the disassembly routines: */
void
instrlist_disassemble(dcontext_t *dcontext, app_pc tag, instrlist_t *ilist,
file_t outfile)
{
int len, sz;
instr_t *instr;
byte *addr;
byte *next_addr;
byte bytes[64]; /* scratch array for encoding instrs */
int level;
int offs = 0;
/* we want to print out the decode level each instr is at, so we have to
* do a little work
*/
print_file(outfile, "TAG " PFX "\n", tag);
for (instr = instrlist_first(ilist); instr; instr = instr_get_next(instr)) {
DOLOG(5, LOG_ALL, {
if (instr_raw_bits_valid(instr)) {
print_file(outfile, " <raw " PFX "-" PFX ">::\n",
instr_get_raw_bits(instr),
instr_get_raw_bits(instr) + instr_length(dcontext, instr));
}
if (instr_get_translation(instr) != NULL) {
print_file(outfile, " <translation " PFX ">::\n",
instr_get_translation(instr));
}
});
if (instr_needs_encoding(instr)) {
byte *nxt_pc;
level = 4;
/* encode instr and then output as BINARY */
nxt_pc = instr_encode_ignore_reachability(dcontext, instr, bytes);
ASSERT(nxt_pc != NULL);
len = (int)(nxt_pc - bytes);
addr = bytes;
CLIENT_ASSERT(len < 64, "instrlist_disassemble: too-long instr");
} else {
addr = instr_get_raw_bits(instr);
len = instr_length(dcontext, instr);
if (instr_operands_valid(instr))
level = 3;
else if (instr_opcode_valid(instr))
level = 2;
else if (decode_sizeof(dcontext, addr, NULL _IF_X86_64(NULL)) == len)
level = 1;
else
level = 0;
}
/* Print out individual instructions. Remember that multiple
* instructions may be packed into a single instr.
*/
if (level > 3 ||
/* Print as an instr for L3 to get IT predicates */
(level == 3 && !instr_is_cti_short_rewrite(instr, addr))) {
/* for L4 we want to see instr targets and don't care
* as much about raw bytes
*/
int extra_sz;
if (level == 3) {
print_file(outfile, " +%-4d %c%d " IF_X64_ELSE("%20s", "%12s"), offs,
instr_is_app(instr) ? 'L' : 'm', level, " ");
} else {
print_file(outfile, " +%-4d %c%d @" PFX " ", offs,
instr_is_app(instr) ? 'L' : 'm', level, instr);
}
extra_sz = print_bytes_to_file(outfile, addr, addr + len, instr);
instr_disassemble(dcontext, instr, outfile);
print_file(outfile, "\n");
if (extra_sz > 0) {
print_file(outfile, IF_X64_ELSE("%30s", "%22s"), " ");
print_extra_bytes_to_file(outfile, addr, addr + len, extra_sz, "");
}
offs += len;
len = 0; /* skip loop */
}
while (len) {
print_file(outfile, " +%-4d %c%d " IF_X64_ELSE("%20s", "%12s"), offs,
instr_is_app(instr) ? 'L' : 'm', level, " ");
next_addr = internal_disassemble_to_file(
dcontext, addr, addr, outfile, false, true,
IF_X64_ELSE(" ",
" "));
if (next_addr == NULL)
break;
sz = (int)(next_addr - addr);
CLIENT_ASSERT(sz <= len, "instrlist_disassemble: invalid length");
len -= sz;
addr += sz;
offs += sz;
}
DOLOG(5, LOG_ALL, { print_file(outfile, "---- multi-instr boundary ----\n"); });
# ifdef CUSTOM_EXIT_STUBS
/* custom exit stub? */
if (instr_is_exit_cti(instr) && instr_is_app(instr)) {
instrlist_t *custom = instr_exit_stub_code(instr);
if (custom != NULL) {
print_file(outfile, "\t=> custom exit stub code:\n");
instrlist_disassemble(dcontext, instr_get_branch_target_pc(instr), custom,
outfile);
}
}
# endif
}
print_file(outfile, "END " PFX "\n\n", tag);
}
#endif /* INTERNAL || CLIENT_INTERFACE */
/***************************************************************************/
#ifndef STANDALONE_DECODER
static void
callstack_dump_module_info(char *buf, size_t bufsz, size_t *sofar, app_pc pc, uint flags)
{
if (TEST(CALLSTACK_MODULE_INFO, flags)) {
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
if (ma != NULL) {
print_to_buffer(
buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "mod=\"" PFX "\" offs=\"" PFX "\" "
: " <%s+" PIFX ">",
TEST(CALLSTACK_MODULE_PATH, flags) ? ma->full_path
: GET_MODULE_NAME(&ma->names),
pc - ma->start);
}
os_get_module_info_unlock();
}
}
static void
internal_dump_callstack_to_buffer(char *buf, size_t bufsz, size_t *sofar, app_pc cur_pc,
app_pc ebp, uint flags)
{
ptr_uint_t *pc = (ptr_uint_t *)ebp;
int num = 0;
LOG_DECLARE(char symbolbuf[MAXIMUM_SYMBOL_LENGTH];)
const char *symbol_name = "";
if (TEST(CALLSTACK_ADD_HEADER, flags)) {
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "\t<call-stack tid=" TIDFMT ">\n"
: "Thread " TIDFMT
" call stack:\n",
/* We avoid TLS tid to work on crashes */
IF_WINDOWS_ELSE(d_r_get_thread_id(), get_sys_thread_id()));
}
if (cur_pc != NULL) {
DOLOG(1, LOG_SYMBOLS, {
print_symbolic_address(cur_pc, symbolbuf, sizeof(symbolbuf), false);
symbol_name = symbolbuf;
});
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "\t<current_pc=\"" PFX
"\" name=\"%s\" "
: "\t" PFX " %s ",
cur_pc, symbol_name);
callstack_dump_module_info(buf, bufsz, sofar, cur_pc, flags);
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "/>\n" : "\n");
}
while (pc != NULL && is_readable_without_exception_query_os((byte *)pc, 8)) {
DOLOG(1, LOG_SYMBOLS, {
print_symbolic_address((app_pc) * (pc + 1), symbolbuf, sizeof(symbolbuf),
false);
symbol_name = symbolbuf;
});
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "\t\t" : "\t");
if (TEST(CALLSTACK_FRAME_PTR, flags)) {
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags)
? "<frame ptr=\"" PFX "\" parent=\"" PFX "\" "
: "frame ptr " PFX " => parent " PFX ", ",
pc, *pc);
}
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "ret=\"" PFX "\" name=\"%s\" "
: PFX " %s ",
*(pc + 1), symbol_name);
callstack_dump_module_info(buf, bufsz, sofar, (app_pc) * (pc + 1), flags);
print_to_buffer(buf, bufsz, sofar,
TEST(CALLSTACK_USE_XML, flags) ? "/>\n" : "\n");
num++;
/* yes I've seen weird recursive cases before */
if (pc == (ptr_uint_t *)*pc || num > 100)
break;
pc = (ptr_uint_t *)*pc;
}
if (TESTALL(CALLSTACK_USE_XML | CALLSTACK_ADD_HEADER, flags))
print_to_buffer(buf, bufsz, sofar, "\t</call-stack>\n");
}
static void
internal_dump_callstack(app_pc cur_pc, app_pc ebp, file_t outfile, bool dump_xml,
bool header)
{
char buf[MAX_LOG_LENGTH];
size_t sofar = 0;
internal_dump_callstack_to_buffer(buf, BUFFER_SIZE_ELEMENTS(buf), &sofar, cur_pc, ebp,
CALLSTACK_ADD_HEADER | CALLSTACK_FRAME_PTR |
(dump_xml ? CALLSTACK_USE_XML : 0));
print_file(outfile, "%s", buf);
}
void
dump_callstack(app_pc pc, app_pc ebp, file_t outfile, bool dump_xml)
{
internal_dump_callstack(pc, ebp, outfile, dump_xml, true /*header*/);
}
void
dump_callstack_to_buffer(char *buf, size_t bufsz, size_t *sofar, app_pc pc, app_pc ebp,
uint flags)
{
internal_dump_callstack_to_buffer(buf, bufsz, sofar, pc, ebp, flags);
}
# ifdef DEBUG
void
dump_mcontext_callstack(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
LOG(THREAD, LOG_ALL, 1, "Call stack:\n");
internal_dump_callstack((app_pc)mc->pc, (app_pc)get_mcontext_frame_ptr(dcontext, mc),
THREAD, DUMP_NOT_XML, false /*!header*/);
}
# endif
void
dump_dr_callstack(file_t outfile)
{
/* Since we're in DR we can't just clobber the saved app fields --
* so we save them first
*/
app_pc our_ebp = 0;
GET_FRAME_PTR(our_ebp);
LOG(outfile, LOG_ALL, 1, "DynamoRIO call stack:\n");
internal_dump_callstack(NULL /* don't care about cur pc */, our_ebp, outfile,
DUMP_NOT_XML, false /*!header*/);
}
#endif /* !STANDALONE_DECODER */
/***************************************************************************/
| 1 | 17,034 | > k1 src0 src2 -> dst s/src2/src1/ nit: k1 is actually src0 according to instr_get_src(instr, 0) which makes this a little confusing | DynamoRIO-dynamorio | c |
@@ -192,7 +192,7 @@ func (kc *KMDController) StartKMD(args KMDStartArgs) (alreadyRunning bool, err e
return false, errors.New("bad kmd data dir")
}
if (dataDirStat.Mode() & 0077) != 0 {
- logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777)
+ logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777, dataDirStat.Mode()&0077)
return false, errors.New("kmd data dir not secure")
}
} else { | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package nodecontrol
import (
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
"github.com/algorand/go-algorand/cmd/kmd/codes"
"github.com/algorand/go-algorand/daemon/kmd/client"
"github.com/algorand/go-algorand/daemon/kmd/lib/kmdapi"
"github.com/algorand/go-algorand/daemon/kmd/server"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/tokens"
)
const (
// DefaultKMDDataDir is exported so tests can initialize it with config info
DefaultKMDDataDir = "kmd-v0.5"
// DefaultKMDDataDirPerms is exported so tests can initialize the default kmd data dir
DefaultKMDDataDirPerms = 0700
// kmdStdErrFilename is the name of the file in <kmddatadir> where stderr will be captured
kmdStdErrFilename = "kmd-err.log"
// kmdStdOutFilename is the name of the file in <kmddatadir> where stdout will be captured
kmdStdOutFilename = "kmd-out.log"
)
// KMDController wraps directories and processes involved in running kmd
type KMDController struct {
kmd string // path to binary
kmdDataDir string
kmdPIDPath string
}
// MakeKMDController initializes a KMDController
func MakeKMDController(kmdDataDir, binDir string) *KMDController {
kc := &KMDController{}
kc.SetKMDBinDir(binDir)
kc.SetKMDDataDir(kmdDataDir)
return kc
}
// SetKMDBinDir updates the KMDController for a binDir that contains `kmd`
func (kc *KMDController) SetKMDBinDir(binDir string) {
kc.kmd = filepath.Join(binDir, "kmd")
}
// SetKMDDataDir updates the KMDController for a kmd data directory.
func (kc *KMDController) SetKMDDataDir(kmdDataDir string) {
kc.kmdDataDir = kmdDataDir
kc.kmdPIDPath = filepath.Join(kmdDataDir, server.PIDFilename)
}
// KMDClient reads an APIToken and netFile from the kmd dataDir, and then
// builds a KMDClient for the running kmd process
func (kc KMDController) KMDClient() (kmdClient client.KMDClient, err error) {
// Grab the KMD API token
apiToken, err := tokens.GetAndValidateAPIToken(kc.kmdDataDir, tokens.KmdTokenFilename)
if err != nil {
return
}
// Grab the socket file location
netFile := filepath.Join(kc.kmdDataDir, server.NetFilename)
sockPath, err := util.GetFirstLineFromFile(netFile)
if err != nil {
return
}
// Build the client
kmdClient, err = client.MakeKMDClient(sockPath, apiToken)
return
}
func (kc KMDController) buildKMDCommand(args KMDStartArgs) *exec.Cmd {
var startArgs []string
startArgs = append(startArgs, "-d")
startArgs = append(startArgs, kc.kmdDataDir)
startArgs = append(startArgs, "-t")
startArgs = append(startArgs, fmt.Sprintf("%d", args.TimeoutSecs))
return exec.Command(kc.kmd, startArgs...)
}
// GetKMDPID returns the PID from the kmd.pid file in the kmd data directory, or an error
func (kc KMDController) GetKMDPID() (pid int64, err error) {
// Pull out the PID, ignoring newlines
pidStr, err := util.GetFirstLineFromFile(kc.kmdPIDPath)
if err != nil {
return -1, err
}
// Parse as an integer
pid, err = strconv.ParseInt(pidStr, 10, 32)
return
}
// StopKMD reads the net file and kills the kmd process
func (kc *KMDController) StopKMD() (alreadyStopped bool, err error) {
// Find kmd PID
kmdPID, err := kc.GetKMDPID()
if err == nil {
// Kill kmd by PID
err = killPID(int(kmdPID))
if err != nil {
return
}
} else {
err = nil
alreadyStopped = true
}
return
}
// cleanUpZombieKMD removes files that a kmd node that's not actually running
// might have left behind
func (kc KMDController) cleanUpZombieKMD() {
if kc.kmdPIDPath != "" {
os.Remove(kc.kmdPIDPath)
}
}
func (kc *KMDController) setKmdCmdLogFiles(cmd *exec.Cmd) (files []*os.File) {
{ // Scoped to ensure err and out variables aren't mixed up
errFileName := filepath.Join(kc.kmdDataDir, kmdStdErrFilename)
errFile, err := os.OpenFile(errFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err == nil {
cmd.Stderr = errFile
files = append(files, errFile)
} else {
fmt.Fprintf(os.Stderr, "error creating file for capturing stderr: %v\n", err)
}
}
{
outFileName := filepath.Join(kc.kmdDataDir, kmdStdOutFilename)
outFile, err := os.OpenFile(outFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err == nil {
cmd.Stdout = outFile
files = append(files, outFile)
} else {
fmt.Fprintf(os.Stderr, "error creating file for capturing stdout: %v\n", err)
}
}
return
}
// StartKMD spins up a kmd process and waits for it to begin
func (kc *KMDController) StartKMD(args KMDStartArgs) (alreadyRunning bool, err error) {
// Optimistically check if kmd is already running
pid, err := kc.GetKMDPID()
if err == nil {
// Got a PID. Is there actually a process running there?
// "If sig is 0, then no signal is sent, but existence and permission
// checks are still performed"
err = util.KillProcess(int(pid), syscall.Signal(0))
if err == nil {
// Yup, return alreadyRunning = true
return true, nil
}
// Nope, clean up the files the zombie may have left behind
kc.cleanUpZombieKMD()
}
if !filepath.IsAbs(kc.kmdDataDir) {
logging.Base().Errorf("%s: kmd data dir is not an absolute path, which is unsafe", kc.kmdDataDir)
return false, errKMDDataDirNotAbs
}
dataDirStat, err := os.Stat(kc.kmdDataDir)
if err == nil {
if !dataDirStat.IsDir() {
logging.Base().Errorf("%s: kmd data dir exists but is not a directory", kc.kmdDataDir)
return false, errors.New("bad kmd data dir")
}
if (dataDirStat.Mode() & 0077) != 0 {
logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777)
return false, errors.New("kmd data dir not secure")
}
} else {
err = os.MkdirAll(kc.kmdDataDir, DefaultKMDDataDirPerms)
if err != nil {
logging.Base().Errorf("%s: kmd data dir err: %s", kc.kmdDataDir, err)
return false, err
}
}
// Try to start the kmd process
kmdCmd := kc.buildKMDCommand(args)
// Capture stderr and stdout to files
files := kc.setKmdCmdLogFiles(kmdCmd)
// Descriptors will get dup'd after exec, so OK to close when we return
for _, file := range files {
defer file.Close()
}
err = kmdCmd.Start()
if err != nil {
return
}
// Call kmdCmd.Wait() to clean up the process when it exits and report
// why it exited
c := make(chan error)
go func() {
c <- kmdCmd.Wait()
}()
// Wait for kmd to start
success := false
for !success {
select {
case err = <-c:
// Try to extract an exit code
exitError, ok := err.(*exec.ExitError)
if !ok {
return false, errKMDExitedEarly
}
ws := exitError.Sys().(syscall.WaitStatus)
exitCode := ws.ExitStatus()
// Check if we exited because kmd is already running
if exitCode == codes.ExitCodeKMDAlreadyRunning {
kmdClient, err := kc.KMDClient()
if err != nil {
// kmd told us it's running, but we couldn't construct a client.
// we want to keep waiting until the kmd would write out the
// file.
continue
}
// See if the server is up by requesting the versions endpoint
req := kmdapi.VersionsRequest{}
resp := kmdapi.VersionsResponse{}
err = kmdClient.DoV1Request(req, &resp)
if err != nil {
return false, err
}
// cool; kmd is up and running, and responding to version queries.
return true, nil
}
// Fail on any other errors
return false, errKMDExitedEarly
case <-time.After(time.Millisecond * 100):
// If we can't talk to the API yet, spin
kmdClient, err := kc.KMDClient()
if err != nil {
continue
}
// See if the server is up by requesting the versions endpoint
req := kmdapi.VersionsRequest{}
resp := kmdapi.VersionsResponse{}
err = kmdClient.DoV1Request(req, &resp)
if err == nil {
success = true
continue
}
}
}
return
}
| 1 | 41,285 | I believe that you meant to have the following on the second one, right ? `dataDirStat.Mode()^0077` | algorand-go-algorand | go |
@@ -386,8 +386,8 @@ func (s *stream) SetDeadline(t time.Time) error {
}
// CloseRemote makes the stream receive a "virtual" FIN stream frame at a given offset
-func (s *stream) CloseRemote(offset protocol.ByteCount) {
- s.AddStreamFrame(&wire.StreamFrame{FinBit: true, Offset: offset})
+func (s *stream) CloseRemote(offset uint64) {
+ s.AddStreamFrame(&wire.StreamFrame{FinBit: true, Offset: protocol.ByteCount(offset)})
}
// Cancel is called by session to indicate that an error occurred | 1 | package quic
import (
"context"
"fmt"
"io"
"net"
"sync"
"time"
"github.com/lucas-clemente/quic-go/internal/flowcontrol"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/internal/wire"
)
type streamI interface {
Stream
AddStreamFrame(*wire.StreamFrame) error
RegisterRemoteError(error, protocol.ByteCount) error
HasDataForWriting() bool
GetDataForWriting(maxBytes protocol.ByteCount) (data []byte, shouldSendFin bool)
GetWriteOffset() protocol.ByteCount
Finished() bool
Cancel(error)
// methods needed for flow control
GetWindowUpdate() protocol.ByteCount
UpdateSendWindow(protocol.ByteCount)
IsFlowControlBlocked() bool
}
type cryptoStream interface {
streamI
SetReadOffset(protocol.ByteCount)
}
// A Stream assembles the data from StreamFrames and provides a super-convenient Read-Interface
//
// Read() and Write() may be called concurrently, but multiple calls to Read() or Write() individually must be synchronized manually.
type stream struct {
mutex sync.Mutex
ctx context.Context
ctxCancel context.CancelFunc
streamID protocol.StreamID
onData func()
// onReset is a callback that should send a RST_STREAM
onReset func(protocol.StreamID, protocol.ByteCount)
readPosInFrame int
writeOffset protocol.ByteCount
readOffset protocol.ByteCount
// Once set, the errors must not be changed!
err error
// cancelled is set when Cancel() is called
cancelled utils.AtomicBool
// finishedReading is set once we read a frame with a FinBit
finishedReading utils.AtomicBool
// finisedWriting is set once Close() is called
finishedWriting utils.AtomicBool
// resetLocally is set if Reset() is called
resetLocally utils.AtomicBool
// resetRemotely is set if RegisterRemoteError() is called
resetRemotely utils.AtomicBool
frameQueue *streamFrameSorter
readChan chan struct{}
readDeadline time.Time
dataForWriting []byte
finSent utils.AtomicBool
rstSent utils.AtomicBool
writeChan chan struct{}
writeDeadline time.Time
flowController flowcontrol.StreamFlowController
version protocol.VersionNumber
}
var _ Stream = &stream{}
var _ streamI = &stream{}
type deadlineError struct{}
func (deadlineError) Error() string { return "deadline exceeded" }
func (deadlineError) Temporary() bool { return true }
func (deadlineError) Timeout() bool { return true }
var errDeadline net.Error = &deadlineError{}
// newStream creates a new Stream
func newStream(StreamID protocol.StreamID,
onData func(),
onReset func(protocol.StreamID, protocol.ByteCount),
flowController flowcontrol.StreamFlowController,
version protocol.VersionNumber,
) *stream {
s := &stream{
onData: onData,
onReset: onReset,
streamID: StreamID,
flowController: flowController,
frameQueue: newStreamFrameSorter(),
readChan: make(chan struct{}, 1),
writeChan: make(chan struct{}, 1),
version: version,
}
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
return s
}
// Read implements io.Reader. It is not thread safe!
func (s *stream) Read(p []byte) (int, error) {
s.mutex.Lock()
err := s.err
s.mutex.Unlock()
if s.cancelled.Get() || s.resetLocally.Get() {
return 0, err
}
if s.finishedReading.Get() {
return 0, io.EOF
}
bytesRead := 0
for bytesRead < len(p) {
s.mutex.Lock()
frame := s.frameQueue.Head()
if frame == nil && bytesRead > 0 {
err = s.err
s.mutex.Unlock()
return bytesRead, err
}
var err error
for {
// Stop waiting on errors
if s.resetLocally.Get() || s.cancelled.Get() {
err = s.err
break
}
deadline := s.readDeadline
if !deadline.IsZero() && !time.Now().Before(deadline) {
err = errDeadline
break
}
if frame != nil {
s.readPosInFrame = int(s.readOffset - frame.Offset)
break
}
s.mutex.Unlock()
if deadline.IsZero() {
<-s.readChan
} else {
select {
case <-s.readChan:
case <-time.After(deadline.Sub(time.Now())):
}
}
s.mutex.Lock()
frame = s.frameQueue.Head()
}
s.mutex.Unlock()
if err != nil {
return bytesRead, err
}
m := utils.Min(len(p)-bytesRead, int(frame.DataLen())-s.readPosInFrame)
if bytesRead > len(p) {
return bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p))
}
if s.readPosInFrame > int(frame.DataLen()) {
return bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, frame.DataLen())
}
copy(p[bytesRead:], frame.Data[s.readPosInFrame:])
s.readPosInFrame += m
bytesRead += m
s.readOffset += protocol.ByteCount(m)
// when a RST_STREAM was received, the was already informed about the final byteOffset for this stream
if !s.resetRemotely.Get() {
s.flowController.AddBytesRead(protocol.ByteCount(m))
}
s.onData() // so that a possible WINDOW_UPDATE is sent
if s.readPosInFrame >= int(frame.DataLen()) {
fin := frame.FinBit
s.mutex.Lock()
s.frameQueue.Pop()
s.mutex.Unlock()
if fin {
s.finishedReading.Set(true)
return bytesRead, io.EOF
}
}
}
return bytesRead, nil
}
func (s *stream) Write(p []byte) (int, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.resetLocally.Get() || s.err != nil {
return 0, s.err
}
if s.finishedWriting.Get() {
return 0, fmt.Errorf("write on closed stream %d", s.streamID)
}
if len(p) == 0 {
return 0, nil
}
s.dataForWriting = make([]byte, len(p))
copy(s.dataForWriting, p)
s.onData()
var err error
for {
deadline := s.writeDeadline
if !deadline.IsZero() && !time.Now().Before(deadline) {
err = errDeadline
break
}
if s.dataForWriting == nil || s.err != nil {
break
}
s.mutex.Unlock()
if deadline.IsZero() {
<-s.writeChan
} else {
select {
case <-s.writeChan:
case <-time.After(deadline.Sub(time.Now())):
}
}
s.mutex.Lock()
}
if err != nil {
return 0, err
}
if s.err != nil {
return len(p) - len(s.dataForWriting), s.err
}
return len(p), nil
}
func (s *stream) GetWriteOffset() protocol.ByteCount {
return s.writeOffset
}
// HasDataForWriting says if there's stream available to be dequeued for writing
func (s *stream) HasDataForWriting() bool {
s.mutex.Lock()
hasData := s.err == nil && // nothing should be sent if an error occurred
(len(s.dataForWriting) > 0 || // there is data queued for sending
s.finishedWriting.Get() && !s.finSent.Get()) // if there is no data, but writing finished and the FIN hasn't been sent yet
s.mutex.Unlock()
return hasData
}
func (s *stream) GetDataForWriting(maxBytes protocol.ByteCount) ([]byte, bool /* should send FIN */) {
data, shouldSendFin := s.getDataForWritingImpl(maxBytes)
if shouldSendFin {
s.finSent.Set(true)
}
return data, shouldSendFin
}
func (s *stream) getDataForWritingImpl(maxBytes protocol.ByteCount) ([]byte, bool /* should send FIN */) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.err != nil || s.dataForWriting == nil {
return nil, s.finishedWriting.Get() && !s.finSent.Get()
}
// TODO(#657): Flow control for the crypto stream
if s.streamID != s.version.CryptoStreamID() {
maxBytes = utils.MinByteCount(maxBytes, s.flowController.SendWindowSize())
}
if maxBytes == 0 {
return nil, false
}
var ret []byte
if protocol.ByteCount(len(s.dataForWriting)) > maxBytes {
ret = s.dataForWriting[:maxBytes]
s.dataForWriting = s.dataForWriting[maxBytes:]
} else {
ret = s.dataForWriting
s.dataForWriting = nil
s.signalWrite()
}
s.writeOffset += protocol.ByteCount(len(ret))
s.flowController.AddBytesSent(protocol.ByteCount(len(ret)))
return ret, s.finishedWriting.Get() && s.dataForWriting == nil && !s.finSent.Get()
}
// Close implements io.Closer
func (s *stream) Close() error {
s.finishedWriting.Set(true)
s.ctxCancel()
s.onData()
return nil
}
func (s *stream) shouldSendReset() bool {
if s.rstSent.Get() {
return false
}
return (s.resetLocally.Get() || s.resetRemotely.Get()) && !s.finishedWriteAndSentFin()
}
// AddStreamFrame adds a new stream frame
func (s *stream) AddStreamFrame(frame *wire.StreamFrame) error {
maxOffset := frame.Offset + frame.DataLen()
if err := s.flowController.UpdateHighestReceived(maxOffset, frame.FinBit); err != nil {
return err
}
s.mutex.Lock()
defer s.mutex.Unlock()
if err := s.frameQueue.Push(frame); err != nil && err != errDuplicateStreamData {
return err
}
s.signalRead()
return nil
}
// signalRead performs a non-blocking send on the readChan
func (s *stream) signalRead() {
select {
case s.readChan <- struct{}{}:
default:
}
}
// signalRead performs a non-blocking send on the writeChan
func (s *stream) signalWrite() {
select {
case s.writeChan <- struct{}{}:
default:
}
}
func (s *stream) SetReadDeadline(t time.Time) error {
s.mutex.Lock()
oldDeadline := s.readDeadline
s.readDeadline = t
s.mutex.Unlock()
// if the new deadline is before the currently set deadline, wake up Read()
if t.Before(oldDeadline) {
s.signalRead()
}
return nil
}
func (s *stream) SetWriteDeadline(t time.Time) error {
s.mutex.Lock()
oldDeadline := s.writeDeadline
s.writeDeadline = t
s.mutex.Unlock()
if t.Before(oldDeadline) {
s.signalWrite()
}
return nil
}
func (s *stream) SetDeadline(t time.Time) error {
_ = s.SetReadDeadline(t) // SetReadDeadline never errors
_ = s.SetWriteDeadline(t) // SetWriteDeadline never errors
return nil
}
// CloseRemote makes the stream receive a "virtual" FIN stream frame at a given offset
func (s *stream) CloseRemote(offset protocol.ByteCount) {
s.AddStreamFrame(&wire.StreamFrame{FinBit: true, Offset: offset})
}
// Cancel is called by session to indicate that an error occurred
// The stream should will be closed immediately
func (s *stream) Cancel(err error) {
s.mutex.Lock()
s.cancelled.Set(true)
s.ctxCancel()
// errors must not be changed!
if s.err == nil {
s.err = err
s.signalRead()
s.signalWrite()
}
s.mutex.Unlock()
}
// resets the stream locally
func (s *stream) Reset(err error) {
if s.resetLocally.Get() {
return
}
s.mutex.Lock()
s.resetLocally.Set(true)
s.ctxCancel()
// errors must not be changed!
if s.err == nil {
s.err = err
s.signalRead()
s.signalWrite()
}
if s.shouldSendReset() {
s.onReset(s.streamID, s.writeOffset)
s.rstSent.Set(true)
}
s.mutex.Unlock()
}
// resets the stream remotely
func (s *stream) RegisterRemoteError(err error, offset protocol.ByteCount) error {
if s.resetRemotely.Get() {
return nil
}
s.mutex.Lock()
s.resetRemotely.Set(true)
s.ctxCancel()
// errors must not be changed!
if s.err == nil {
s.err = err
s.signalWrite()
}
if err := s.flowController.UpdateHighestReceived(offset, true); err != nil {
return err
}
if s.shouldSendReset() {
s.onReset(s.streamID, s.writeOffset)
s.rstSent.Set(true)
}
s.mutex.Unlock()
return nil
}
func (s *stream) finishedWriteAndSentFin() bool {
return s.finishedWriting.Get() && s.finSent.Get()
}
func (s *stream) Finished() bool {
return s.cancelled.Get() ||
(s.finishedReading.Get() && s.finishedWriteAndSentFin()) ||
(s.resetRemotely.Get() && s.rstSent.Get()) ||
(s.finishedReading.Get() && s.rstSent.Get()) ||
(s.finishedWriteAndSentFin() && s.resetRemotely.Get())
}
func (s *stream) Context() context.Context {
return s.ctx
}
func (s *stream) StreamID() protocol.StreamID {
return s.streamID
}
func (s *stream) UpdateSendWindow(n protocol.ByteCount) {
s.flowController.UpdateSendWindow(n)
}
func (s *stream) IsFlowControlBlocked() bool {
return s.flowController.IsBlocked()
}
func (s *stream) GetWindowUpdate() protocol.ByteCount {
return s.flowController.GetWindowUpdate()
}
// SetReadOffset sets the read offset.
// It is only needed for the crypto stream.
// It must not be called concurrently with any other stream methods, especially Read and Write.
func (s *stream) SetReadOffset(offset protocol.ByteCount) {
s.readOffset = offset
s.frameQueue.readPosition = offset
}
| 1 | 7,067 | The reason a `ByteCount` is used here, is that the H2 mapping in gQUIC requires this layer violation, which is why `CloseRemote` is not part of the public API. This layer violation will be resolved in IETF QUIC | lucas-clemente-quic-go | go |
@@ -260,6 +260,17 @@ bool IsDetrimentalSpell(uint16 spell_id)
return !IsBeneficialSpell(spell_id);
}
+bool IsInvisSpell(uint16 spell_id)
+{
+ if (IsEffectInSpell(spell_id, SE_Invisibility) ||
+ IsEffectInSpell(spell_id, SE_Invisibility2) ||
+ IsEffectInSpell(spell_id, SE_InvisVsUndead) ||
+ IsEffectInSpell(spell_id, SE_InvisVsUndead2) ||
+ IsEffectInSpell(spell_id, SE_InvisVsAnimals))
+ return true;
+ return false;
+}
+
bool IsInvulnerabilitySpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_DivineAura); | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2002 EQEMu Development Team (http://eqemu.org)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
General outline of spell casting process
1.
a) Client clicks a spell bar gem, ability, or item. client_process.cpp
gets the op and calls CastSpell() with all the relevant info including
cast time.
b) NPC does CastSpell() from AI
2.
a) CastSpell() determines there is a cast time and sets some state keeping
flags to be used to check the progress of casting and finish it later.
b) CastSpell() sees there's no cast time, and calls CastedSpellFinished()
Go to step 4.
3.
SpellProcess() notices that the spell casting timer which was set by
CastSpell() is expired, and calls CastedSpellFinished()
4.
CastedSpellFinished() checks some timed spell specific things, like
wether to interrupt or not, due to movement or melee. If successful
SpellFinished() is called.
5.
SpellFinished() checks some things like LoS, reagents, target and
figures out what's going to get hit by this spell based on its type.
6.
a) Single target spell, SpellOnTarget() is called.
b) AE spell, Entity::AESpell() is called.
c) Group spell, Group::CastGroupSpell()/SpellOnTarget() is called as
needed.
7.
SpellOnTarget() may or may not call SpellEffect() to cause effects to
the target
8.
If this was timed, CastedSpellFinished() will restore the client's
spell bar gems.
Most user code should call CastSpell(), with a 0 casting time if needed,
and not SpellFinished().
*/
#include "../common/eqemu_logsys.h"
#include "classes.h"
#include "spdat.h"
#ifndef WIN32
#include <stdlib.h>
#include "unix.h"
#endif
///////////////////////////////////////////////////////////////////////////////
// spell property testing functions
bool IsTargetableAESpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && spells[spell_id].targettype == ST_AETarget) {
return true;
}
return false;
}
bool IsSacrificeSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_Sacrifice);
}
bool IsLifetapSpell(uint16 spell_id)
{
// Ancient Lifebane: 2115
if (IsValidSpell(spell_id) &&
(spells[spell_id].targettype == ST_Tap ||
spells[spell_id].targettype == ST_TargetAETap ||
spell_id == 2115))
return true;
return false;
}
bool IsMezSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_Mez);
}
bool IsStunSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_Stun);
}
bool IsSummonSpell(uint16 spellid)
{
for (int o = 0; o < EFFECT_COUNT; o++) {
uint32 tid = spells[spellid].effectid[o];
if (tid == SE_SummonPet || tid == SE_SummonItem || tid == SE_SummonPC)
return true;
}
return false;
}
bool IsEvacSpell(uint16 spellid)
{
return IsEffectInSpell(spellid, SE_Succor);
}
bool IsDamageSpell(uint16 spellid)
{
for (int o = 0; o < EFFECT_COUNT; o++) {
uint32 tid = spells[spellid].effectid[o];
if ((tid == SE_CurrentHPOnce || tid == SE_CurrentHP) &&
spells[spellid].targettype != ST_Tap && spells[spellid].buffduration < 1 &&
spells[spellid].base[o] < 0)
return true;
}
return false;
}
bool IsFearSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_Fear);
}
bool IsCureSpell(uint16 spell_id)
{
const SPDat_Spell_Struct &sp = spells[spell_id];
bool CureEffect = false;
for(int i = 0; i < EFFECT_COUNT; i++){
if (sp.effectid[i] == SE_DiseaseCounter || sp.effectid[i] == SE_PoisonCounter
|| sp.effectid[i] == SE_CurseCounter || sp.effectid[i] == SE_CorruptionCounter)
CureEffect = true;
}
if (CureEffect && IsBeneficialSpell(spell_id))
return true;
return false;
}
bool IsSlowSpell(uint16 spell_id)
{
const SPDat_Spell_Struct &sp = spells[spell_id];
for(int i = 0; i < EFFECT_COUNT; i++)
if ((sp.effectid[i] == SE_AttackSpeed && sp.base[i] < 100) ||
(sp.effectid[i] == SE_AttackSpeed4))
return true;
return false;
}
bool IsHasteSpell(uint16 spell_id)
{
const SPDat_Spell_Struct &sp = spells[spell_id];
for(int i = 0; i < EFFECT_COUNT; i++)
if(sp.effectid[i] == SE_AttackSpeed)
return (sp.base[i] < 100);
return false;
}
bool IsHarmonySpell(uint16 spell_id)
{
// IsEffectInSpell(spell_id, SE_Lull) - Lull is not calculated anywhere atm
return (IsEffectInSpell(spell_id, SE_Harmony) || IsEffectInSpell(spell_id, SE_ChangeFrenzyRad));
}
bool IsPercentalHealSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_PercentalHeal);
}
bool IsGroupOnlySpell(uint16 spell_id)
{
return IsValidSpell(spell_id) && spells[spell_id].goodEffect == 2;
}
bool IsBeneficialSpell(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
// You'd think just checking goodEffect flag would be enough?
if (spells[spell_id].goodEffect == 1) {
// If the target type is ST_Self or ST_Pet and is a SE_CancleMagic spell
// it is not Beneficial
SpellTargetType tt = spells[spell_id].targettype;
if (tt != ST_Self && tt != ST_Pet &&
IsEffectInSpell(spell_id, SE_CancelMagic))
return false;
// When our targettype is ST_Target, ST_AETarget, ST_Aniaml, ST_Undead, or ST_Pet
// We need to check more things!
if (tt == ST_Target || tt == ST_AETarget || tt == ST_Animal ||
tt == ST_Undead || tt == ST_Pet) {
uint16 sai = spells[spell_id].SpellAffectIndex;
// If the resisttype is magic and SpellAffectIndex is Calm/memblur/dispell sight
// it's not beneficial
if (spells[spell_id].resisttype == RESIST_MAGIC) {
// checking these SAI cause issues with the rng defensive proc line
// So I guess instead of fixing it for real, just a quick hack :P
if (spells[spell_id].effectid[0] != SE_DefensiveProc &&
(sai == SAI_Calm || sai == SAI_Dispell_Sight || sai == SAI_Memory_Blur ||
sai == SAI_Calm_Song))
return false;
} else {
// If the resisttype is not magic and spell is Bind Sight or Cast Sight
// It's not beneficial
if ((sai == SAI_Calm && IsEffectInSpell(spell_id, SE_Harmony)) || (sai == SAI_Calm_Song && IsEffectInSpell(spell_id, SE_BindSight)) || (sai == SAI_Dispell_Sight && spells[spell_id].skill == 18 && !IsEffectInSpell(spell_id, SE_VoiceGraft)))
return false;
}
}
}
// And finally, if goodEffect is not 0 or if it's a group spell it's beneficial
return spells[spell_id].goodEffect != 0 || IsGroupSpell(spell_id);
}
bool IsDetrimentalSpell(uint16 spell_id)
{
return !IsBeneficialSpell(spell_id);
}
bool IsInvulnerabilitySpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_DivineAura);
}
bool IsCHDurationSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_CompleteHeal);
}
bool IsPoisonCounterSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_PoisonCounter);
}
bool IsDiseaseCounterSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_DiseaseCounter);
}
bool IsSummonItemSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_SummonItem);
}
bool IsSummonSkeletonSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_NecPet);
}
bool IsSummonPetSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_SummonPet) ||
IsEffectInSpell(spell_id, SE_SummonBSTPet))
return true;
return false;
}
bool IsSummonPCSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_SummonPC);
}
bool IsCharmSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_Charm);
}
bool IsBlindSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_Blind);
}
bool IsEffectHitpointsSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_CurrentHP);
}
bool IsReduceCastTimeSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_IncreaseSpellHaste);
}
bool IsIncreaseDurationSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_IncreaseSpellDuration);
}
bool IsReduceManaSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_ReduceManaCost);
}
bool IsExtRangeSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_IncreaseRange);
}
bool IsImprovedHealingSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_ImprovedHeal);
}
bool IsImprovedDamageSpell(uint16 spell_id)
{
return IsEffectInSpell(spell_id, SE_ImprovedDamage);
}
bool IsAEDurationSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) &&
(spells[spell_id].targettype == ST_AETarget || spells[spell_id].targettype == ST_UndeadAE) &&
spells[spell_id].AEDuration != 0)
return true;
return false;
}
bool IsPureNukeSpell(uint16 spell_id)
{
int i, effect_count = 0;
if (!IsValidSpell(spell_id))
return false;
for (i = 0; i < EFFECT_COUNT; i++)
if (!IsBlankSpellEffect(spell_id, i))
effect_count++;
if (effect_count == 1 && IsEffectInSpell(spell_id, SE_CurrentHP) &&
spells[spell_id].buffduration == 0 && IsDamageSpell(spell_id))
return true;
return false;
}
bool IsAENukeSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && IsPureNukeSpell(spell_id) &&
spells[spell_id].aoerange > 0)
return true;
return false;
}
bool IsPBAENukeSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && IsPureNukeSpell(spell_id) &&
spells[spell_id].aoerange > 0 && spells[spell_id].targettype == ST_AECaster)
return true;
return false;
}
bool IsAERainNukeSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && IsPureNukeSpell(spell_id) &&
spells[spell_id].aoerange > 0 && spells[spell_id].AEDuration > 1000)
return true;
return false;
}
bool IsPartialCapableSpell(uint16 spell_id)
{
if (spells[spell_id].no_partial_resist)
return false;
// spell uses 600 (partial) scale if first effect is damage, else it uses 200 scale.
// this includes DoTs. no_partial_resist excludes spells like necro snares
for (int o = 0; o < EFFECT_COUNT; o++) {
auto tid = spells[spell_id].effectid[o];
if (IsBlankSpellEffect(spell_id, o))
continue;
if ((tid == SE_CurrentHPOnce || tid == SE_CurrentHP) && spells[spell_id].base[o] < 0)
return true;
return false;
}
return false;
}
bool IsResistableSpell(uint16 spell_id)
{
// for now only detrimental spells are resistable. later on i will
// add specific exceptions for the beneficial spells that are resistable
if (IsDetrimentalSpell(spell_id))
return true;
return false;
}
// checks if this spell affects your group
bool IsGroupSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) &&
(spells[spell_id].targettype == ST_AEBard ||
spells[spell_id].targettype == ST_Group ||
spells[spell_id].targettype == ST_GroupTeleport))
return true;
return false;
}
// checks if this spell can be targeted
bool IsTGBCompatibleSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) &&
(!IsDetrimentalSpell(spell_id) && spells[spell_id].buffduration != 0 &&
!IsBardSong(spell_id) && !IsEffectInSpell(spell_id, SE_Illusion)))
return true;
return false;
}
bool IsBardSong(uint16 spell_id)
{
if (IsValidSpell(spell_id) && spells[spell_id].classes[BARD - 1] < 255 && !spells[spell_id].IsDisciplineBuff)
return true;
return false;
}
bool IsEffectInSpell(uint16 spellid, int effect)
{
int j;
if (!IsValidSpell(spellid))
return false;
for (j = 0; j < EFFECT_COUNT; j++)
if (spells[spellid].effectid[j] == effect)
return true;
return false;
}
// arguments are spell id and the index of the effect to check.
// this is used in loops that process effects inside a spell to skip
// the blanks
bool IsBlankSpellEffect(uint16 spellid, int effect_index)
{
int effect, base, formula;
effect = spells[spellid].effectid[effect_index];
base = spells[spellid].base[effect_index];
formula = spells[spellid].formula[effect_index];
// SE_CHA is "spacer"
// SE_Stacking* are also considered blank where this is used
if (effect == SE_Blank || (effect == SE_CHA && base == 0 && formula == 100) ||
effect == SE_StackingCommand_Block || effect == SE_StackingCommand_Overwrite)
return true;
return false;
}
// checks some things about a spell id, to see if we can proceed
bool IsValidSpell(uint32 spellid)
{
if (SPDAT_RECORDS > 0 && spellid != 0 && spellid != 1 &&
spellid != 0xFFFFFFFF && spellid < SPDAT_RECORDS && spells[spellid].player_1[0])
return true;
return false;
}
// returns the lowest level of any caster which can use the spell
int GetMinLevel(uint16 spell_id)
{
int r, min = 255;
const SPDat_Spell_Struct &spell = spells[spell_id];
for (r = 0; r < PLAYER_CLASS_COUNT; r++)
if (spell.classes[r] < min)
min = spell.classes[r];
// if we can't cast the spell return 0
// just so it wont screw up calculations used in other areas of the code
// seen 127, 254, 255
if (min >= 127)
return 0;
else
return min;
}
int GetSpellLevel(uint16 spell_id, int classa)
{
if (classa >= PLAYER_CLASS_COUNT)
return 255;
const SPDat_Spell_Struct &spell = spells[spell_id];
return spell.classes[classa - 1];
}
// this will find the first occurrence of effect. this is handy
// for spells like mez and charm, but if the effect appears more than once
// in a spell this will just give back the first one.
int GetSpellEffectIndex(uint16 spell_id, int effect)
{
int i;
if (!IsValidSpell(spell_id))
return -1;
for (i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == effect)
return i;
return -1;
}
// returns the level required to use the spell if that class/level
// can use it, 0 otherwise
// note: this isn't used by anything right now
int CanUseSpell(uint16 spellid, int classa, int level)
{
int level_to_use;
if (!IsValidSpell(spellid) || classa >= PLAYER_CLASS_COUNT)
return 0;
level_to_use = spells[spellid].classes[classa - 1];
if (level_to_use && level_to_use != 255 && level >= level_to_use)
return level_to_use;
return 0;
}
bool BeneficialSpell(uint16 spell_id)
{
if (spell_id <= 0 || spell_id >= SPDAT_RECORDS
/*|| spells[spell_id].stacking == 27*/ )
return true;
switch (spells[spell_id].goodEffect) {
case 1:
case 3:
return true;
}
return false;
}
bool GroupOnlySpell(uint16 spell_id)
{
switch (spells[spell_id].goodEffect) {
case 2:
case 3:
return true;
}
switch (spell_id) {
case 1771:
return true;
}
return false;
}
int32 CalculatePoisonCounters(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return 0;
int32 Counters = 0;
for (int i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == SE_PoisonCounter &&
spells[spell_id].base[i] > 0)
Counters += spells[spell_id].base[i];
return Counters;
}
int32 CalculateDiseaseCounters(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return 0;
int32 Counters = 0;
for (int i = 0; i < EFFECT_COUNT; i++)
if(spells[spell_id].effectid[i] == SE_DiseaseCounter &&
spells[spell_id].base[i] > 0)
Counters += spells[spell_id].base[i];
return Counters;
}
int32 CalculateCurseCounters(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return 0;
int32 Counters = 0;
for (int i = 0; i < EFFECT_COUNT; i++)
if(spells[spell_id].effectid[i] == SE_CurseCounter &&
spells[spell_id].base[i] > 0)
Counters += spells[spell_id].base[i];
return Counters;
}
int32 CalculateCorruptionCounters(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return 0;
int32 Counters = 0;
for (int i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == SE_CorruptionCounter &&
spells[spell_id].base[i] > 0)
Counters += spells[spell_id].base[i];
return Counters;
}
int32 CalculateCounters(uint16 spell_id)
{
int32 counter = CalculatePoisonCounters(spell_id);
if (counter != 0)
return counter;
counter = CalculateDiseaseCounters(spell_id);
if (counter != 0)
return counter;
counter = CalculateCurseCounters(spell_id);
if (counter != 0)
return counter;
counter = CalculateCorruptionCounters(spell_id);
return counter;
}
bool IsDisciplineBuff(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
if (spells[spell_id].IsDisciplineBuff && spells[spell_id].targettype == ST_Self)
return true;
return false;
}
bool IsDiscipline(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
if (spells[spell_id].mana == 0 &&
(spells[spell_id].EndurCost || spells[spell_id].EndurUpkeep))
return true;
return false;
}
bool IsCombatSkill(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
//Check if Discipline
if ((spells[spell_id].mana == 0 && (spells[spell_id].EndurCost || spells[spell_id].EndurUpkeep)))
return true;
return false;
}
bool IsResurrectionEffects(uint16 spell_id)
{
// spell id 756 is Resurrection Effects spell
if(IsValidSpell(spell_id) && (spell_id == 756 || spell_id == 757))
return true;
return false;
}
bool IsRuneSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id))
for (int i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == SE_Rune)
return true;
return false;
}
bool IsMagicRuneSpell(uint16 spell_id)
{
if(IsValidSpell(spell_id))
for(int i = 0; i < EFFECT_COUNT; i++)
if(spells[spell_id].effectid[i] == SE_AbsorbMagicAtt)
return true;
return false;
}
bool IsManaTapSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id))
for (int i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == SE_CurrentMana &&
spells[spell_id].targettype == ST_Tap)
return true;
return false;
}
bool IsAllianceSpellLine(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_AddFaction))
return true;
return false;
}
bool IsDeathSaveSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_DeathSave))
return true;
return false;
}
// Deathsave spells with base of 1 are partial
bool IsPartialDeathSaveSpell(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
for (int i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == SE_DeathSave &&
spells[spell_id].base[i] == 1)
return true;
return false;
}
// Deathsave spells with base 2 are "full"
bool IsFullDeathSaveSpell(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
for (int i = 0; i < EFFECT_COUNT; i++)
if (spells[spell_id].effectid[i] == SE_DeathSave &&
spells[spell_id].base[i] == 2)
return true;
return false;
}
bool IsShadowStepSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_ShadowStep))
return true;
return false;
}
bool IsSuccorSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_Succor))
return true;
return false;
}
bool IsTeleportSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_Teleport))
return true;
return false;
}
bool IsGateSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_Gate))
return true;
return false;
}
bool IsPlayerIllusionSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_Illusion) &&
spells[spell_id].targettype == ST_Self)
return true;
return false;
}
int GetSpellEffectDescNum(uint16 spell_id)
{
if (IsValidSpell(spell_id))
return spells[spell_id].effectdescnum;
return -1;
}
DmgShieldType GetDamageShieldType(uint16 spell_id, int32 DSType)
{
// If we have a DamageShieldType for this spell from the damageshieldtypes table, return that,
// else, make a guess, based on the resist type. Default return value is DS_THORNS
if (IsValidSpell(spell_id)) {
LogSpells("DamageShieldType for spell [{}] ([{}]) is [{}]", spell_id,
spells[spell_id].name, spells[spell_id].DamageShieldType);
if (spells[spell_id].DamageShieldType)
return (DmgShieldType) spells[spell_id].DamageShieldType;
switch (spells[spell_id].resisttype) {
case RESIST_COLD:
return DS_TORMENT;
case RESIST_FIRE:
return DS_BURN;
case RESIST_DISEASE:
return DS_DECAY;
default:
return DS_THORNS;
}
}
else if (DSType)
return (DmgShieldType) DSType;
return DS_THORNS;
}
bool IsLDoNObjectSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_AppraiseLDonChest) ||
IsEffectInSpell(spell_id, SE_DisarmLDoNTrap) ||
IsEffectInSpell(spell_id, SE_UnlockLDoNChest))
return true;
return false;
}
int32 GetSpellResistType(uint16 spell_id)
{
return spells[spell_id].resisttype;
}
int32 GetSpellTargetType(uint16 spell_id)
{
return (int32)spells[spell_id].targettype;
}
bool IsHealOverTimeSpell(uint16 spell_id)
{
if (IsEffectInSpell(spell_id, SE_HealOverTime) && !IsGroupSpell(spell_id))
return true;
return false;
}
bool IsCompleteHealSpell(uint16 spell_id)
{
if (spell_id == 13 || IsEffectInSpell(spell_id, SE_CompleteHeal) ||
(IsPercentalHealSpell(spell_id) && !IsGroupSpell(spell_id)))
return true;
return false;
}
bool IsFastHealSpell(uint16 spell_id)
{
const int MaxFastHealCastingTime = 2000;
if (spells[spell_id].cast_time <= MaxFastHealCastingTime &&
spells[spell_id].effectid[0] == 0 && spells[spell_id].base[0] > 0 &&
!IsGroupSpell(spell_id))
return true;
return false;
}
bool IsVeryFastHealSpell(uint16 spell_id)
{
const int MaxFastHealCastingTime = 1000;
if (spells[spell_id].cast_time <= MaxFastHealCastingTime &&
spells[spell_id].effectid[0] == 0 && spells[spell_id].base[0] > 0 &&
!IsGroupSpell(spell_id))
return true;
return false;
}
bool IsRegularSingleTargetHealSpell(uint16 spell_id)
{
if(spells[spell_id].effectid[0] == 0 && spells[spell_id].base[0] > 0 &&
spells[spell_id].targettype == ST_Target && spells[spell_id].buffduration == 0 &&
!IsCompleteHealSpell(spell_id) &&
!IsHealOverTimeSpell(spell_id) && !IsGroupSpell(spell_id))
return true;
return false;
}
bool IsRegularGroupHealSpell(uint16 spell_id)
{
if (IsGroupSpell(spell_id) && !IsCompleteHealSpell(spell_id) && !IsHealOverTimeSpell(spell_id))
return true;
return false;
}
bool IsGroupCompleteHealSpell(uint16 spell_id)
{
if (IsGroupSpell(spell_id) && IsCompleteHealSpell(spell_id))
return true;
return false;
}
bool IsGroupHealOverTimeSpell(uint16 spell_id)
{
if( IsGroupSpell(spell_id) && IsHealOverTimeSpell(spell_id) && spells[spell_id].buffduration < 10)
return true;
return false;
}
bool IsDebuffSpell(uint16 spell_id)
{
if (IsBeneficialSpell(spell_id) || IsEffectHitpointsSpell(spell_id) || IsStunSpell(spell_id) ||
IsMezSpell(spell_id) || IsCharmSpell(spell_id) || IsSlowSpell(spell_id) ||
IsEffectInSpell(spell_id, SE_Root) || IsEffectInSpell(spell_id, SE_CancelMagic) ||
IsEffectInSpell(spell_id, SE_MovementSpeed) || IsFearSpell(spell_id) || IsEffectInSpell(spell_id, SE_InstantHate))
return false;
else
return true;
}
bool IsResistDebuffSpell(uint16 spell_id)
{
if ((IsEffectInSpell(spell_id, SE_ResistFire) || IsEffectInSpell(spell_id, SE_ResistCold) ||
IsEffectInSpell(spell_id, SE_ResistPoison) || IsEffectInSpell(spell_id, SE_ResistDisease) ||
IsEffectInSpell(spell_id, SE_ResistMagic) || IsEffectInSpell(spell_id, SE_ResistAll) ||
IsEffectInSpell(spell_id, SE_ResistCorruption)) && !IsBeneficialSpell(spell_id))
return true;
else
return false;
}
bool IsSelfConversionSpell(uint16 spell_id)
{
if (GetSpellTargetType(spell_id) == ST_Self && IsEffectInSpell(spell_id, SE_CurrentMana) &&
IsEffectInSpell(spell_id, SE_CurrentHP) && spells[spell_id].base[GetSpellEffectIndex(spell_id, SE_CurrentMana)] > 0 &&
spells[spell_id].base[GetSpellEffectIndex(spell_id, SE_CurrentHP)] < 0)
return true;
else
return false;
}
// returns true for both detrimental and beneficial buffs
bool IsBuffSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && (spells[spell_id].buffduration || spells[spell_id].buffdurationformula))
return true;
return false;
}
bool IsPersistDeathSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && spells[spell_id].persistdeath)
return true;
return false;
}
bool IsSuspendableSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) && spells[spell_id].suspendable)
return true;
return false;
}
uint32 GetMorphTrigger(uint32 spell_id)
{
for (int i = 0; i < EFFECT_COUNT; ++i)
if (spells[spell_id].effectid[i] == SE_CastOnFadeEffect)
return spells[spell_id].base[i];
return 0;
}
bool IsCastonFadeDurationSpell(uint16 spell_id)
{
for (int i = 0; i < EFFECT_COUNT; ++i) {
if (spells[spell_id].effectid[i] == SE_CastOnFadeEffect
|| spells[spell_id].effectid[i] == SE_CastOnFadeEffectNPC
|| spells[spell_id].effectid[i] == SE_CastOnFadeEffectAlways){
return true;
}
}
return false;
}
bool IsPowerDistModSpell(uint16 spell_id)
{
if (IsValidSpell(spell_id) &&
(spells[spell_id].max_dist_mod || spells[spell_id].min_dist_mod) && spells[spell_id].max_dist > spells[spell_id].min_dist)
return true;
return false;
}
uint32 GetPartialMeleeRuneReduction(uint32 spell_id)
{
for (int i = 0; i < EFFECT_COUNT; ++i)
if (spells[spell_id].effectid[i] == SE_MitigateMeleeDamage)
return spells[spell_id].base[i];
return 0;
}
uint32 GetPartialMagicRuneReduction(uint32 spell_id)
{
for (int i = 0; i < EFFECT_COUNT; ++i)
if (spells[spell_id].effectid[i] == SE_MitigateSpellDamage)
return spells[spell_id].base[i];
return 0;
}
uint32 GetPartialMeleeRuneAmount(uint32 spell_id)
{
for (int i = 0; i < EFFECT_COUNT; ++i)
if (spells[spell_id].effectid[i] == SE_MitigateMeleeDamage)
return spells[spell_id].max[i];
return 0;
}
uint32 GetPartialMagicRuneAmount(uint32 spell_id)
{
for (int i = 0; i < EFFECT_COUNT; ++i)
if (spells[spell_id].effectid[i] == SE_MitigateSpellDamage)
return spells[spell_id].max[i];
return 0;
}
bool DetrimentalSpellAllowsRest(uint16 spell_id)
{
if (IsValidSpell(spell_id))
return spells[spell_id].AllowRest;
return false;
}
bool NoDetrimentalSpellAggro(uint16 spell_id)
{
if (IsValidSpell(spell_id))
return spells[spell_id].no_detrimental_spell_aggro;
return false;
}
bool IsStackableDot(uint16 spell_id)
{
// rules according to client
if (!IsValidSpell(spell_id))
return false;
const auto &spell = spells[spell_id];
if (spell.dot_stacking_exempt || spell.goodEffect || !spell.buffdurationformula)
return false;
return IsEffectInSpell(spell_id, SE_CurrentHP) || IsEffectInSpell(spell_id, SE_GravityEffect);
}
bool IsBardOnlyStackEffect(int effect)
{
switch(effect) {
/*case SE_CurrentMana:
case SE_ManaRegen_v2:
case SE_CurrentHP:
case SE_HealOverTime:*/
case SE_BardAEDot:
return true;
default:
return false;
}
}
bool IsCastWhileInvis(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return false;
const auto &spell = spells[spell_id];
if (spell.sneak || spell.cast_not_standing)
return true;
return false;
}
bool IsEffectIgnoredInStacking(int spa)
{
// this should match RoF2
switch (spa) {
case SE_SeeInvis:
case SE_DiseaseCounter:
case SE_PoisonCounter:
case SE_Levitate:
case SE_InfraVision:
case SE_UltraVision:
case SE_CurrentHPOnce:
case SE_CurseCounter:
case SE_ImprovedDamage:
case SE_ImprovedHeal:
case SE_SpellResistReduction:
case SE_IncreaseSpellHaste:
case SE_IncreaseSpellDuration:
case SE_IncreaseRange:
case SE_SpellHateMod:
case SE_ReduceReagentCost:
case SE_ReduceManaCost:
case SE_FcStunTimeMod:
case SE_LimitMaxLevel:
case SE_LimitResist:
case SE_LimitTarget:
case SE_LimitEffect:
case SE_LimitSpellType:
case SE_LimitSpell:
case SE_LimitMinDur:
case SE_LimitInstant:
case SE_LimitMinLevel:
case SE_LimitCastTimeMin:
case SE_LimitCastTimeMax:
case SE_StackingCommand_Block:
case SE_StackingCommand_Overwrite:
case SE_PetPowerIncrease:
case SE_SkillDamageAmount:
case SE_ChannelChanceSpells:
case SE_Blank:
case SE_FcDamageAmt:
case SE_SpellDurationIncByTic:
case SE_FcSpellVulnerability:
case SE_FcDamageAmtIncoming:
case SE_FcDamagePctCrit:
case SE_FcDamageAmtCrit:
case SE_ReduceReuseTimer:
case SE_LimitCombatSkills:
case SE_BlockNextSpellFocus:
case SE_SpellTrigger:
case SE_LimitManaMin:
case SE_CorruptionCounter:
case SE_ApplyEffect:
case SE_NegateSpellEffect:
case SE_LimitSpellGroup:
case SE_LimitManaMax:
case SE_FcHealAmt:
case SE_FcHealPctIncoming:
case SE_FcHealAmtIncoming:
case SE_FcHealPctCritIncoming:
case SE_FcHealAmtCrit:
case SE_LimitClass:
case SE_LimitRace:
case SE_FcBaseEffects:
case 415:
case SE_SkillDamageAmount2:
case SE_FcLimitUse:
case SE_FcIncreaseNumHits:
case SE_LimitUseMin:
case SE_LimitUseType:
case SE_GravityEffect:
case 425:
return true;
default:
return false;
}
}
uint32 GetNimbusEffect(uint16 spell_id)
{
if (IsValidSpell(spell_id))
return (int32)spells[spell_id].NimbusEffect;
return 0;
}
int32 GetFuriousBash(uint16 spell_id)
{
if (!IsValidSpell(spell_id))
return 0;
bool found_effect_limit = false;
int32 mod = 0;
for (int i = 0; i < EFFECT_COUNT; ++i)
if (spells[spell_id].effectid[i] == SE_SpellHateMod)
mod = spells[spell_id].base[i];
else if (spells[spell_id].effectid[i] == SE_LimitEffect && spells[spell_id].base[i] == 999)
found_effect_limit = true;
if (found_effect_limit)
return mod;
else
return 0;
}
bool IsShortDurationBuff(uint16 spell_id)
{
if (IsValidSpell(spell_id) && spells[spell_id].short_buff_box != 0)
return true;
return false;
}
bool IsSpellUsableThisZoneType(uint16 spell_id, uint8 zone_type)
{
//check if spell can be cast in any zone (-1 or 255), then if spell zonetype matches zone's zonetype
// || spells[spell_id].zonetype == 255 comparing signed 8 bit int to 255 is always false
if (IsValidSpell(spell_id) && (spells[spell_id].zonetype == -1 ||
spells[spell_id].zonetype == zone_type))
return true;
return false;
}
const char* GetSpellName(uint16 spell_id)
{
return spells[spell_id].name;
}
| 1 | 10,604 | I dig the helper function! | EQEmu-Server | cpp |
@@ -0,0 +1,2 @@
+// package samples contains sample programs using the pubsub API.
+package samples | 1 | 1 | 13,279 | Please add the Copyright header. | google-go-cloud | go |
|
@@ -37,6 +37,7 @@ import sys
import tempfile
import luigi
+from ..target import FileSystemTarget
logger = logging.getLogger('luigi-interface')
| 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2016 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Template tasks for running external programs as luigi tasks.
This module is primarily intended for when you need to call a single external
program or shell script, and it's enough to specify program arguments and
environment variables.
If you need to run multiple commands, chain them together or pipe output
from one command to the next, you're probably better off using something like
`plumbum`_, and wrapping plumbum commands in normal luigi
:py:class:`~luigi.task.Task` s.
.. _plumbum: https://plumbum.readthedocs.io/
"""
import logging
import os
import signal
import subprocess
import sys
import tempfile
import luigi
logger = logging.getLogger('luigi-interface')
class ExternalProgramTask(luigi.Task):
"""
Template task for running an external program in a subprocess
The program is run using :py:class:`subprocess.Popen`, with ``args`` passed
as a list, generated by :py:meth:`program_args` (where the first element should
be the executable). See :py:class:`subprocess.Popen` for details.
Your must override :py:meth:`program_args` to specify the arguments you want,
and you can optionally override :py:meth:`program_environment` if you want to
control the environment variables (see :py:class:`ExternalPythonProgramTask`
for an example).
"""
def program_args(self):
"""
Override this method to map your task parameters to the program arguments
:return: list to pass as ``args`` to :py:class:`subprocess.Popen`
"""
raise NotImplementedError
def program_environment(self):
"""
Override this method to control environment variables for the program
:return: dict mapping environment variable names to values
"""
env = os.environ.copy()
return env
@property
def always_log_stderr(self):
"""
When True, stderr will be logged even if program execution succeeded
Override to False to log stderr only when program execution fails.
"""
return True
def _clean_output_file(self, file_object):
file_object.seek(0)
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines()))
def run(self):
args = list(map(str, self.program_args()))
logger.info('Running command: %s', ' '.join(args))
tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile()
env = self.program_environment()
proc = subprocess.Popen(
args,
env=env,
stdout=tmp_stdout,
stderr=tmp_stderr
)
try:
with ExternalProgramRunContext(proc):
proc.wait()
success = proc.returncode == 0
stdout = self._clean_output_file(tmp_stdout)
stderr = self._clean_output_file(tmp_stderr)
if stdout:
logger.info('Program stdout:\n{}'.format(stdout))
if stderr:
if self.always_log_stderr or not success:
logger.info('Program stderr:\n{}'.format(stderr))
if not success:
raise ExternalProgramRunError(
'Program failed with return code={}:'.format(proc.returncode),
args, env=env, stdout=stdout, stderr=stderr)
finally:
tmp_stderr.close()
tmp_stdout.close()
class ExternalProgramRunContext(object):
def __init__(self, proc):
self.proc = proc
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
def kill_job(self, captured_signal=None, stack_frame=None):
self.proc.kill()
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
class ExternalProgramRunError(RuntimeError):
def __init__(self, message, args, env=None, stdout=None, stderr=None):
super(ExternalProgramRunError, self).__init__(message, args, env, stdout, stderr)
self.message = message
self.args = args
self.env = env
self.out = stdout
self.err = stderr
def __str__(self):
info = self.message
info += '\nCOMMAND: {}'.format(' '.join(self.args))
info += '\nSTDOUT: {}'.format(self.out or '[empty]')
info += '\nSTDERR: {}'.format(self.err or '[empty]')
env_string = None
if self.env:
env_string = ' '.join(['='.join([k, '\'{}\''.format(v)]) for k, v in self.env.items()])
info += '\nENVIRONMENT: {}'.format(env_string or '[empty]')
# reset terminal color in case the ENVIRONMENT changes colors
info += '\033[m'
return info
class ExternalPythonProgramTask(ExternalProgramTask):
"""
Template task for running an external Python program in a subprocess
Simple extension of :py:class:`ExternalProgramTask`, adding two
:py:class:`luigi.parameter.Parameter` s for setting a virtualenv and for
extending the ``PYTHONPATH``.
"""
virtualenv = luigi.Parameter(
default=None,
positional=False,
description='path to the virtualenv directory to use. It should point to '
'the directory containing the ``bin/activate`` file used for '
'enabling the virtualenv.')
extra_pythonpath = luigi.Parameter(
default=None,
positional=False,
description='extend the search path for modules by prepending this '
'value to the ``PYTHONPATH`` environment variable.')
def program_environment(self):
env = super(ExternalPythonProgramTask, self).program_environment()
if self.extra_pythonpath:
pythonpath = ':'.join([self.extra_pythonpath, env.get('PYTHONPATH', '')])
env.update({'PYTHONPATH': pythonpath})
if self.virtualenv:
# Make the same changes to the env that a normal venv/bin/activate script would
path = ':'.join(['{}/bin'.format(self.virtualenv), env.get('PATH', '')])
env.update({
'PATH': path,
'VIRTUAL_ENV': self.virtualenv
})
# remove PYTHONHOME env variable, if it exists
env.pop('PYTHONHOME', None)
return env
| 1 | 17,389 | Is this the best way to import `FileSystemTarget`? | spotify-luigi | py |
@@ -2751,10 +2751,10 @@ static void
replace_thread_id(thread_id_t old, thread_id_t new)
{
#ifdef HAVE_TLS
- thread_id_t new_tid = new;
+ int32_t new_tid = (int32_t) new; /* can't use thread_id_t since it's 64-bits on x64 */
ASSERT(is_thread_tls_initialized());
DOCHECK(1, {
- thread_id_t old_tid;
+ int32_t old_tid; /* can't use thread_id_t since it's 64-bits on x64 */
READ_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid);
ASSERT(old_tid == old);
}); | 1 | /* *******************************************************************************
* Copyright (c) 2010-2017 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* os.c - Linux specific routines
*/
/* Easiest to match kernel stat struct by using 64-bit.
* This limits us to 2.4+ kernel but that's ok.
* I don't really want to get into requiring kernel headers to build
* general release packages, though that would be fine for targeted builds.
* There are 3 different stat syscalls (SYS_oldstat, SYS_stat, and SYS_stat64)
* and using _LARGEFILE64_SOURCE with SYS_stat64 is the best match.
*/
#define _LARGEFILE64_SOURCE
/* for mmap-related #defines */
#include <sys/types.h>
#include <sys/mman.h>
/* in case MAP_32BIT is missing */
#ifndef MAP_32BIT
# define MAP_32BIT 0x40
#endif
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON /* MAP_ANON on Mac */
#endif
/* for open */
#include <sys/stat.h>
#include <fcntl.h>
#include "../globals.h"
#include "../hashtable.h"
#include <string.h>
#include <unistd.h> /* for write and usleep and _exit */
#include <limits.h>
#ifdef MACOS
# include <sys/sysctl.h> /* for sysctl */
# ifndef SYS___sysctl
/* The name was changed on Yosemite */
# define SYS___sysctl SYS_sysctl
# endif
# include <mach/mach_traps.h> /* for swtch_pri */
# include "include/syscall_mach.h"
#endif
#ifdef LINUX
# include <sys/vfs.h> /* for statfs */
#elif defined(MACOS)
# include <sys/mount.h> /* for statfs */
# include <mach/mach.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <mach/sync_policy.h>
#endif
#include <dirent.h>
/* for getrlimit */
#include <sys/time.h>
#include <sys/resource.h>
#ifndef X64
struct compat_rlimit {
uint rlim_cur;
uint rlim_max;
};
#endif
#ifdef LINUX
/* For clone and its flags, the manpage says to include sched.h with _GNU_SOURCE
* defined. _GNU_SOURCE brings in unwanted extensions and causes name
* conflicts. Instead, we include unix/sched.h which comes from the Linux
* kernel headers.
*/
# include <linux/sched.h>
#endif
#include "module.h" /* elf */
#include "tls.h"
#ifdef LINUX
# include "module_private.h" /* for ELF_AUXV_TYPE and AT_PAGESZ */
#endif
#if defined(X86) && defined(DEBUG)
# include "os_asm_defines.asm" /* for TLS_SELF_OFFSET_ASM */
#endif
#ifndef F_DUPFD_CLOEXEC /* in linux 2.6.24+ */
# define F_DUPFD_CLOEXEC 1030
#endif
/* This is not always sufficient to identify a syscall return value.
* For example, MacOS has some 32-bit syscalls that return 64-bit
* values in xdx:xax.
*/
#define MCXT_SYSCALL_RES(mc) ((mc)->IF_X86_ELSE(xax, r0))
#if defined(AARCH64)
# define ASM_R2 "x2"
# define ASM_R3 "x3"
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrs "ASM_R3", tpidr_el0\n\t" \
"ldr "ASM_R3", ["ASM_R3", "ASM_R2"] \n\t"
#elif defined(ARM)
# define ASM_R2 "r2"
# define ASM_R3 "r3"
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrc p15, 0, "ASM_R3", c13, c0, "STRINGIFY(USR_TLS_REG_OPCODE)" \n\t" \
"ldr "ASM_R3", ["ASM_R3", "ASM_R2"] \n\t"
#endif /* ARM */
/* Prototype for all functions in .init_array. */
typedef int (*init_fn_t)(int argc, char **argv, char **envp);
/* For STATIC_LIBRARY we do not cache environ so the app can change it. */
#ifndef STATIC_LIBRARY
/* i#46: Private __environ pointer. Points at the environment variable array
* on the stack, which is different from what libc __environ may point at. We
* use the environment for following children and setting options, so its OK
* that we don't see what libc says.
*/
char **our_environ;
#endif
#include <errno.h>
/* avoid problems with use of errno as var name in rest of file */
#if !defined(STANDALONE_UNIT_TEST) && !defined(MACOS)
# undef errno
#endif
/* we define __set_errno below */
/* must be prior to <link.h> => <elf.h> => INT*_{MIN,MAX} */
# include "instr.h" /* for get_app_segment_base() */
#include "decode_fast.h" /* decode_cti: maybe os_handle_mov_seg should be ifdef X86? */
#include <dlfcn.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <syslog.h> /* vsyslog */
#include "../vmareas.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h"
#endif
#ifdef LINUX
# include "include/syscall.h" /* our own local copy */
#else
# include <sys/syscall.h>
#endif
#include "../module_shared.h"
#include "os_private.h"
#include "../synch.h"
#include "memquery.h"
#include "ksynch.h"
#ifndef HAVE_MEMINFO_QUERY
# include "memcache.h"
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
/* Cross arch syscall nums for use with struct stat64. */
#ifdef X64
# ifdef SYS_stat
# define SYSNUM_STAT SYS_stat
# endif
# define SYSNUM_FSTAT SYS_fstat
#else
# define SYSNUM_STAT SYS_stat64
# define SYSNUM_FSTAT SYS_fstat64
#endif
#ifdef MACOS
# define SYSNUM_EXIT_PROCESS SYS_exit
# define SYSNUM_EXIT_THREAD SYS_bsdthread_terminate
#else
# define SYSNUM_EXIT_PROCESS SYS_exit_group
# define SYSNUM_EXIT_THREAD SYS_exit
#endif
#ifdef ANDROID
/* Custom prctl flags specific to Android (xref i#1861) */
# define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
#endif
#ifdef NOT_DYNAMORIO_CORE_PROPER
# undef ASSERT
# undef ASSERT_NOT_IMPLEMENTED
# undef ASSERT_NOT_TESTED
# undef ASSERT_CURIOSITY
# define ASSERT(x) /* nothing */
# define ASSERT_NOT_IMPLEMENTED(x) /* nothing */
# define ASSERT_NOT_TESTED(x) /* nothing */
# define ASSERT_CURIOSITY(x) /* nothing */
# undef LOG
# undef DOSTATS
# define LOG(...) /* nothing */
# define DOSTATS(...) /* nothing */
#else /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */
/* Guards data written by os_set_app_thread_area(). */
DECLARE_CXTSWPROT_VAR(static mutex_t set_thread_area_lock,
INIT_LOCK_FREE(set_thread_area_lock));
static bool first_thread_tls_initialized;
static bool last_thread_tls_exited;
tls_type_t tls_global_type;
#ifndef HAVE_TLS
/* We use a table lookup to find a thread's dcontext */
/* Our only current no-TLS target, VMKernel (VMX86_SERVER), doesn't have apps with
* tons of threads anyway
*/
#define MAX_THREADS 512
typedef struct _tls_slot_t {
thread_id_t tid;
dcontext_t *dcontext;
} tls_slot_t;
/* Stored in heap for self-prot */
static tls_slot_t *tls_table;
/* not static so deadlock_avoidance_unlock() can look for it */
DECLARE_CXTSWPROT_VAR(mutex_t tls_lock, INIT_LOCK_FREE(tls_lock));
#endif
#ifdef CLIENT_INTERFACE
/* Should we place this in a client header? Currently mentioned in
* dr_raw_tls_calloc() docs.
*/
static bool client_tls_allocated[MAX_NUM_CLIENT_TLS];
DECLARE_CXTSWPROT_VAR(static mutex_t client_tls_lock, INIT_LOCK_FREE(client_tls_lock));
#endif
#include <stddef.h> /* for offsetof */
#include <sys/utsname.h> /* for struct utsname */
/* forward decl */
static void handle_execve_post(dcontext_t *dcontext);
static bool os_switch_lib_tls(dcontext_t *dcontext, bool to_app);
static bool os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app);
#ifdef X86
static bool os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base);
#endif
#ifdef LINUX
static bool handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size,
byte *old_base, size_t old_size,
uint old_prot, uint old_type);
static void handle_app_brk(dcontext_t *dcontext, byte *lowest_brk/*if known*/,
byte *old_brk, byte *new_brk);
#endif
/* full path to our own library, used for execve */
static char dynamorio_library_path[MAXIMUM_PATH]; /* just dir */
static char dynamorio_library_filepath[MAXIMUM_PATH];
/* Issue 20: path to other architecture */
static char dynamorio_alt_arch_path[MAXIMUM_PATH];
static char dynamorio_alt_arch_filepath[MAXIMUM_PATH]; /* just dir */
/* Makefile passes us LIBDIR_X{86,64} defines */
#define DR_LIBDIR_X86 STRINGIFY(LIBDIR_X86)
#define DR_LIBDIR_X64 STRINGIFY(LIBDIR_X64)
/* pc values delimiting dynamo dll image */
static app_pc dynamo_dll_start = NULL;
static app_pc dynamo_dll_end = NULL; /* open-ended */
static app_pc executable_start = NULL;
static app_pc executable_end = NULL;
/* Used by get_application_name(). */
static char executable_path[MAXIMUM_PATH];
static char *executable_basename;
/* does the kernel provide tids that must be used to distinguish threads in a group? */
static bool kernel_thread_groups;
static bool kernel_64bit;
pid_t pid_cached;
static bool fault_handling_initialized;
#ifdef PROFILE_RDTSC
uint kilo_hertz; /* cpu clock speed */
#endif
/* Xref PR 258731, dup of STDOUT/STDERR in case app wants to close them. */
DR_API file_t our_stdout = STDOUT_FILENO;
DR_API file_t our_stderr = STDERR_FILENO;
DR_API file_t our_stdin = STDIN_FILENO;
/* we steal fds from the app */
static struct rlimit app_rlimit_nofile; /* cur rlimit set by app */
static int min_dr_fd;
/* we store all DR files so we can prevent the app from changing them,
* and so we can close them in a child of fork.
* the table key is the fd and the payload is the set of DR_FILE_* flags.
*/
static generic_table_t *fd_table;
#define INIT_HTABLE_SIZE_FD 6 /* should remain small */
#ifdef DEBUG
static int num_fd_add_pre_heap;
#endif
#ifdef LINUX
/* i#1004: brk emulation */
static byte *app_brk_map;
static byte *app_brk_cur;
static byte *app_brk_end;
#endif
#ifdef MACOS
/* xref i#1404: we should expose these via the dr_get_os_version() API */
static int macos_version;
# define MACOS_VERSION_SIERRA 16
# define MACOS_VERSION_EL_CAPITAN 15
# define MACOS_VERSION_YOSEMITE 14
# define MACOS_VERSION_MAVERICKS 13
# define MACOS_VERSION_MOUNTAIN_LION 12
# define MACOS_VERSION_LION 11
#endif
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os);
static void
process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot,
uint flags _IF_DEBUG(const char *map_type));
#ifdef LINUX
static char *
read_proc_self_exe(bool ignore_cache);
#endif
/* Libc independent directory iterator, similar to readdir. If we ever need
* this on Windows we should generalize it and export it to clients.
*/
typedef struct _dir_iterator_t {
file_t fd;
int off;
int end;
const char *name; /* Name of the current entry. */
char buf[4 * MAXIMUM_PATH]; /* Expect stack alloc, so not too big. */
} dir_iterator_t;
static void os_dir_iterator_start(dir_iterator_t *iter, file_t fd);
static bool os_dir_iterator_next(dir_iterator_t *iter);
/* XXX: If we generalize to Windows, will we need os_dir_iterator_stop()? */
/* vsyscall page. hardcoded at 0xffffe000 in earlier kernels, but
* randomly placed since fedora2.
* marked rx then: FIXME: should disallow this guy when that's the case!
* random vsyscall page is identified in maps files as "[vdso]"
* (kernel-provided fake shared library or Virt Dyn Shared Object).
*/
/* i#1583: vdso is now 2 pages, yet we assume vsyscall is on 1st page. */
app_pc vsyscall_page_start = NULL;
/* pc of the end of the syscall instr itself */
app_pc vsyscall_syscall_end_pc = NULL;
/* pc where kernel returns control after sysenter vsyscall */
app_pc vsyscall_sysenter_return_pc = NULL;
/* pc where our hook-displaced code was copied */
app_pc vsyscall_sysenter_displaced_pc = NULL;
#define VSYSCALL_PAGE_START_HARDCODED ((app_pc)(ptr_uint_t) 0xffffe000)
#ifdef X64
/* i#430, in Red Hat Enterprise Server 5.6, vsyscall region is marked
* not executable
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
*/
# define VSYSCALL_REGION_MAPS_NAME "[vsyscall]"
#endif
/* i#1908: vdso and vsyscall are now split */
app_pc vdso_page_start = NULL;
#if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY)
/* The pthreads library keeps errno in its pthread_descr data structure,
* which it looks up by dispatching on the stack pointer. This doesn't work
* when within dynamo. Thus, we define our own __errno_location() for use both
* by us and the app, to prevent pthreads looking at the stack pointer when
* out of the code cache.
*/
/* FIXME: maybe we should create 1st dcontext earlier so we don't need init_errno?
* any problems with init_errno being set and then dcontext->errno being read?
* FIXME: if a thread issues a dr_app_stop, then we don't want to use
* this errno slot? But it may later do a start...probably ok to keep using
* the slot. But, when threads die, they'll all use the same init_errno!
*/
static int init_errno; /* errno until 1st dcontext created */
int *
__errno_location(void) {
/* Each dynamo thread should have a separate errno */
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return &init_errno;
else {
/* WARNING: init_errno is in data segment so can be RO! */
return &(dcontext->upcontext_ptr->dr_errno);
}
}
#endif /* !STANDALONE_UNIT_TEST && !STATIC_LIBRARY */
#if defined(HAVE_TLS) && defined(CLIENT_INTERFACE)
/* i#598
* (gdb) x/20i (*(errno_loc_t)0xf721e413)
* 0xf721e413 <__errno_location>: push %ebp
* 0xf721e414 <__errno_location+1>: mov %esp,%ebp
* 0xf721e416 <__errno_location+3>: call <__x86.get_pc_thunk.cx>
* 0xf721e41b <__errno_location+8>: add $0x166bd9,%ecx
* 0xf721e421 <__errno_location+14>: mov -0x1c(%ecx),%eax
* 0xf721e427 <__errno_location+20>: add %gs:0x0,%eax
* 0xf721e42e <__errno_location+27>: pop %ebp
* 0xf721e42f <__errno_location+28>: ret
*
* __errno_location calcuates the errno location by adding
* TLS's base with errno's offset in TLS.
* However, because the TLS has been switched in os_tls_init,
* the calculated address is wrong.
* We first get the errno offset in TLS at init time and
* calculate correct address by adding the app's tls base.
*/
/* __errno_location on ARM:
* 0xb6f0b290 <__errno_location>: ldr r3, [pc, #12]
* 0xb6f0b292 <__errno_location+2>: mrc 15, 0, r0, cr13, cr0, {3}
* 0xb6f0b296 <__errno_location+6>: add r3, pc
* 0xb6f0b298 <__errno_location+8>: ldr r3, [r3, #0]
* 0xb6f0b29a <__errno_location+10>: adds r0, r0, r3
* 0xb6f0b29c <__errno_location+12>: bx lr
* It uses the predefined offset to get errno location in TLS,
* and we should be able to reuse the code here.
*/
static int libc_errno_tls_offs;
static int *
our_libc_errno_loc(void)
{
void *app_tls = os_get_app_tls_base(NULL, TLS_REG_LIB);
if (app_tls == NULL)
return NULL;
return (int *)(app_tls + libc_errno_tls_offs);
}
#endif
/* i#238/PR 499179: libc errno preservation
*
* Errno location is per-thread so we store the
* function globally and call it each time. Note that pthreads seems
* to be the one who provides per-thread errno: using raw syscalls to
* create threads, we end up with a global errno:
*
* > for i in linux.thread.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007f153de26698
* libc errno loc: 0x00007f153de26698
* > for i in pthreads.pthreads.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007fc24d1ce698
* libc errno loc: 0x00007fc24d1cd8b8
* libc errno loc: 0x00007fc24c7cc8b8
*/
typedef int *(*errno_loc_t)(void);
static errno_loc_t
get_libc_errno_location(bool do_init)
{
static errno_loc_t libc_errno_loc;
if (do_init) {
module_iterator_t *mi = module_iterator_start();
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
const char *modname = GET_MODULE_NAME(&area->names);
/* We ensure matches start to avoid matching "libgolibc.so".
* GET_MODULE_NAME never includes the path: i#138 will add path.
*/
if (modname != NULL && strstr(modname, "libc.so") == modname) {
bool found = true;
/* called during init when .data is writable */
libc_errno_loc = (errno_loc_t)
get_proc_address(area->start, "__errno_location");
ASSERT(libc_errno_loc != NULL);
LOG(GLOBAL, LOG_THREADS, 2, "libc errno loc func: "PFX"\n",
libc_errno_loc);
#ifdef CLIENT_INTERFACE
/* Currently, the DR is loaded by system loader and hooked up
* to app's libc. So right now, we still need this routine.
* we can remove this after libc independency and/or
* early injection
*/
if (INTERNAL_OPTION(private_loader)) {
acquire_recursive_lock(&privload_lock);
if (privload_lookup_by_base(area->start) != NULL)
found = false;
release_recursive_lock(&privload_lock);
}
#endif
if (found)
break;
}
}
module_iterator_stop(mi);
#if defined(HAVE_TLS) && defined(CLIENT_INTERFACE)
/* i#598: init the libc errno's offset. If we didn't find libc above,
* then we don't need to do this.
*/
if (INTERNAL_OPTION(private_loader) && libc_errno_loc != NULL) {
void *priv_lib_tls_base = os_get_priv_tls_base(NULL, TLS_REG_LIB);
ASSERT(priv_lib_tls_base != NULL);
libc_errno_tls_offs = (void *)libc_errno_loc() - priv_lib_tls_base;
libc_errno_loc = &our_libc_errno_loc;
}
#endif
}
return libc_errno_loc;
}
/* i#238/PR 499179: our __errno_location isn't affecting libc so until
* we have libc independence or our own private isolated libc we need
* to preserve the app's libc's errno
*/
int
get_libc_errno(void)
{
#if defined(STANDALONE_UNIT_TEST) && (defined(MACOS) || defined(ANDROID))
return errno;
#else
# ifdef STANDALONE_UNIT_TEST
errno_loc_t func = __errno_location;
# else
errno_loc_t func = get_libc_errno_location(false);
# endif
if (func == NULL) {
/* libc hasn't been loaded yet or we're doing early injection. */
return 0;
} else {
int *loc = (*func)();
ASSERT(loc != NULL);
LOG(THREAD_GET, LOG_THREADS, 5, "libc errno loc: "PFX"\n", loc);
if (loc != NULL)
return *loc;
}
return 0;
#endif
}
/* N.B.: pthreads has two other locations it keeps on a per-thread basis:
* h_errno and res_state. See glibc-2.2.4/linuxthreads/errno.c.
* If dynamo ever modifies those we'll need to do to them what we now do to
* errno.
*/
/* The environment vars exhibit totally messed up behavior when someone
* does an execve of /bin/sh -- not sure what's going on, but using our
* own implementation of unsetenv fixes all our problems. If we use
* libc's, unsetenv either does nothing or ends up having getenv return
* NULL for other vars that are obviously set (by iterating through environ).
* FIXME: find out the real story here.
*/
int
our_unsetenv(const char *name)
{
/* FIXME: really we should have some kind of synchronization */
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return -1;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return -1;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. Shift the subsequent entries. Keep going to
* handle later matches.
*/
char **e;
for (e = env; *e != NULL; e++)
*e = *(e + 1);
} else {
env++;
}
}
return 0;
}
/* Clobbers the name rather than shifting, to preserve auxv (xref i#909). */
bool
disable_env(const char *name)
{
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return false;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return false;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. If we shift subsequent entries we'll mess
* up access to auxv, which is after the env block, so we instead
* disable the env var by changing its name.
* We keep going to handle later matches.
*/
snprintf(*env, name_len, "__disabled__");
}
env++;
}
return true;
}
/* i#46: Private getenv.
*/
char *
our_getenv(const char *name)
{
char **env = our_environ;
size_t i;
size_t name_len;
if (name == NULL || name[0] == '\0' || strchr(name, '=') != NULL) {
return NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "our_environ is missing. _init() or "
"dynamorio_set_envp() were not called", our_environ != NULL);
if (our_environ == NULL)
return NULL;
name_len = strlen(name);
for (i = 0; env[i] != NULL; i++) {
if (strncmp(env[i], name, name_len) == 0 && env[i][name_len] == '=') {
return env[i] + name_len + 1;
}
}
return NULL;
}
/* Work around drpreload's _init going first. We can get envp in our own _init
* routine down below, but drpreload.so comes first and calls
* dynamorio_app_init before our own _init routine gets called. Apps using the
* app API are unaffected because our _init routine will have run by then. For
* STATIC_LIBRARY, we used to set our_environ in our_init(), but to support
* the app setting DYNAMORIO_OPTIONS after our_init() runs, we now just use environ.
*/
DYNAMORIO_EXPORT
void
dynamorio_set_envp(char **envp)
{
our_environ = envp;
}
/* shared library init */
int
our_init(int argc, char **argv, char **envp)
{
/* If we do not want to use drpreload.so, we can take over here: but when using
* drpreload, this is called *after* we have already taken over.
*/
extern void dynamorio_app_take_over(void);
bool takeover = false;
#ifdef INIT_TAKE_OVER
takeover = true;
#endif
#ifdef VMX86_SERVER
/* PR 391765: take over here instead of using preload */
takeover = os_in_vmkernel_classic();
#endif
#ifndef STATIC_LIBRARY
if (our_environ != NULL) {
/* Set by dynamorio_set_envp above. These should agree. */
ASSERT(our_environ == envp);
} else {
our_environ = envp;
}
#endif
/* if using preload, no -early_inject */
#ifdef STATIC_LIBRARY
if (!takeover) {
const char *takeover_env = getenv("DYNAMORIO_TAKEOVER_IN_INIT");
if (takeover_env != NULL && strcmp(takeover_env, "1") == 0) {
takeover = true;
}
}
#endif
if (takeover) {
if (dynamorio_app_init() == 0 /* success */) {
dynamorio_app_take_over();
}
}
return 0;
}
#if defined(STATIC_LIBRARY) || defined(STANDALONE_UNIT_TEST)
/* If we're getting linked into a binary that already has an _init definition
* like the app's exe or unit_tests, we add a pointer to our_init() to the
* .init_array section. We can't use the constructor attribute because not all
* toolchains pass the args and environment to the constructor.
*/
static init_fn_t
# ifdef MACOS
__attribute__ ((section ("__DATA,__mod_init_func"), aligned (sizeof (void *)), used))
# else
__attribute__ ((section (".init_array"), aligned (sizeof (void *)), used))
# endif
init_array[] = {
our_init
};
#else
/* If we're a normal shared object, then we override _init.
*/
int
_init(int argc, char **argv, char **envp)
{
# ifdef ANDROID
/* i#1862: the Android loader passes *nothing* to lib init routines. We
* rely on DR being listed before libc so we can read the TLS slot the
* kernel set up.
*/
if (!get_kernel_args(&argc, &argv, &envp)) {
/* XXX: scan the stack and look for known auxv patterns or sthg. */
argc = 0;
argv = NULL;
envp = NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to find envp", envp != NULL);
# endif
return our_init(argc, argv, envp);
}
#endif
bool
kernel_is_64bit(void)
{
return kernel_64bit;
}
#ifdef MACOS
/* XXX: if we get enough of these, move to os_macos.c or sthg */
static bool
sysctl_query(int level0, int level1, void *buf, size_t bufsz)
{
int res;
int name[2];
size_t len = bufsz;
name[0] = level0;
name[1] = level1;
res = dynamorio_syscall(SYS___sysctl, 6, &name, 2, buf, &len, NULL, 0);
return (res >= 0);
}
#endif
static void
get_uname(void)
{
/* assumption: only called at init, so we don't need any synch
* or .data unprot
*/
static struct utsname uinfo; /* can be large, avoid stack overflow */
#ifdef MACOS
if (!sysctl_query(CTL_KERN, KERN_OSTYPE, &uinfo.sysname, sizeof(uinfo.sysname)) ||
!sysctl_query(CTL_KERN, KERN_HOSTNAME, &uinfo.nodename,
sizeof(uinfo.nodename)) ||
!sysctl_query(CTL_KERN, KERN_OSRELEASE, &uinfo.release, sizeof(uinfo.release)) ||
!sysctl_query(CTL_KERN, KERN_VERSION, &uinfo.version, sizeof(uinfo.version)) ||
!sysctl_query(CTL_HW, HW_MACHINE, &uinfo.machine, sizeof(uinfo.machine))) {
ASSERT(false && "sysctl queries failed");
return;
}
#else
DEBUG_DECLARE(int res =)
dynamorio_syscall(SYS_uname, 1, (ptr_uint_t)&uinfo);
ASSERT(res >= 0);
#endif
LOG(GLOBAL, LOG_TOP, 1, "uname:\n\tsysname: %s\n", uinfo.sysname);
LOG(GLOBAL, LOG_TOP, 1, "\tnodename: %s\n", uinfo.nodename);
LOG(GLOBAL, LOG_TOP, 1, "\trelease: %s\n", uinfo.release);
LOG(GLOBAL, LOG_TOP, 1, "\tversion: %s\n", uinfo.version);
LOG(GLOBAL, LOG_TOP, 1, "\tmachine: %s\n", uinfo.machine);
if (strncmp(uinfo.machine, "x86_64", sizeof("x86_64")) == 0)
kernel_64bit = true;
#ifdef MACOS
/* XXX: I would skip these checks for standalone so we don't have to set env
* vars for frontends to see the options but I'm still afraid of some syscall
* crash with no output: I'd rather have two messages than silent crashing.
*/
if (DYNAMO_OPTION(max_supported_os_version) != 0) { /* 0 disables */
/* We only support OSX 10.7.5 - 10.9.1. That means kernels 11.x-13.x. */
# define MIN_DARWIN_VERSION_SUPPORTED 11
int kernel_major;
if (sscanf(uinfo.release, "%d", &kernel_major) != 1 ||
kernel_major > DYNAMO_OPTION(max_supported_os_version) ||
kernel_major < MIN_DARWIN_VERSION_SUPPORTED) {
/* We make this non-fatal as it's likely DR will work */
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), uinfo.release);
}
macos_version = kernel_major;
}
#endif
}
/* os-specific initializations */
void
os_init(void)
{
ksynch_init();
get_uname();
/* Populate global data caches. */
get_application_name();
get_application_base();
/* determine whether gettid is provided and needed for threads,
* or whether getpid suffices. even 2.4 kernels have gettid
* (maps to getpid), don't have an old enough target to test this.
*/
#ifdef MACOS
kernel_thread_groups = (dynamorio_syscall(SYS_thread_selfid, 0) >= 0);
#else
kernel_thread_groups = (dynamorio_syscall(SYS_gettid, 0) >= 0);
#endif
LOG(GLOBAL, LOG_TOP|LOG_STATS, 1, "thread id is from %s\n",
kernel_thread_groups ? "gettid" : "getpid");
#ifdef MACOS
/* SYS_thread_selfid was added in 10.6. We have no simple way to get the
* thread id on 10.5, so we don't support it.
*/
if (!kernel_thread_groups) {
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3,
get_application_name(), get_application_pid(), "Mac OSX 10.5 or earlier");
}
#else
ASSERT_CURIOSITY(kernel_thread_groups);
#endif
pid_cached = get_process_id();
#ifdef VMX86_SERVER
vmk_init();
#endif
signal_init();
/* We now set up an early fault handler for safe_read() (i#350) */
fault_handling_initialized = true;
memquery_init();
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
ASSERT_NOT_TESTED();
kilo_hertz = get_timer_frequency();
LOG(GLOBAL, LOG_TOP|LOG_STATS, 1, "CPU MHz is %d\n", kilo_hertz/1000);
}
#endif /* PROFILE_RDTSC */
/* Needs to be after heap_init */
IF_NO_MEMQUERY(memcache_init());
/* we didn't have heap in os_file_init() so create and add global logfile now */
fd_table = generic_hash_create(GLOBAL_DCONTEXT, INIT_HTABLE_SIZE_FD,
80 /* load factor: not perf-critical */,
HASHTABLE_SHARED | HASHTABLE_PERSISTENT,
NULL _IF_DEBUG("fd table"));
#ifdef DEBUG
if (GLOBAL != INVALID_FILE)
fd_table_add(GLOBAL, OS_OPEN_CLOSE_ON_FORK);
#endif
/* Ensure initialization */
get_dynamorio_dll_start();
#ifdef LINUX
if (DYNAMO_OPTION(emulate_brk))
init_emulated_brk(NULL);
#endif
#ifdef ANDROID
/* This must be set up earlier than privload_tls_init, and must be set up
* for non-client-interface as well, as this initializes DR_TLS_BASE_OFFSET
* (i#1931).
*/
init_android_version();
#endif
}
/* called before any logfiles are opened */
void
os_file_init(void)
{
/* We steal fds from the app for better transparency. We lower the max file
* descriptor limit as viewed by the app, and block SYS_dup{2,3} and
* SYS_fcntl(F_DUPFD*) from creating a file explicitly in our space. We do
* not try to stop incremental file opening from extending into our space:
* if the app really is running out of fds, we'll give it some of ours:
* after all we probably don't need all -steal_fds, and if we really need fds
* we typically open them at startup. We also don't bother watching all
* syscalls that take in fds from affecting our fds.
*/
if (DYNAMO_OPTION(steal_fds) > 0) {
struct rlimit rlimit_nofile;
/* SYS_getrlimit uses an old 32-bit-field struct so we want SYS_ugetrlimit */
if (dynamorio_syscall(IF_MACOS_ELSE(SYS_getrlimit,
IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)),
2, RLIMIT_NOFILE, &rlimit_nofile) != 0) {
/* linux default is 1024 */
SYSLOG_INTERNAL_WARNING("getrlimit RLIMIT_NOFILE failed"); /* can't LOG yet */
rlimit_nofile.rlim_cur = 1024;
rlimit_nofile.rlim_max = 1024;
}
/* pretend the limit is lower and reserve the top spots for us.
* for simplicity and to give as much room as possible to app,
* raise soft limit to equal hard limit.
* if an app really depends on a low soft limit, they can run
* with -steal_fds 0.
*/
if (rlimit_nofile.rlim_max > DYNAMO_OPTION(steal_fds)) {
int res;
min_dr_fd = rlimit_nofile.rlim_max - DYNAMO_OPTION(steal_fds);
app_rlimit_nofile.rlim_max = min_dr_fd;
app_rlimit_nofile.rlim_cur = app_rlimit_nofile.rlim_max;
rlimit_nofile.rlim_cur = rlimit_nofile.rlim_max;
res = dynamorio_syscall(SYS_setrlimit, 2, RLIMIT_NOFILE, &rlimit_nofile);
if (res != 0) {
SYSLOG_INTERNAL_WARNING("unable to raise RLIMIT_NOFILE soft limit: %d",
res);
}
} else /* not fatal: we'll just end up using fds in app space */
SYSLOG_INTERNAL_WARNING("unable to reserve fds");
}
/* we don't have heap set up yet so we init fd_table in os_init */
}
/* we need to re-cache after a fork */
static char *
get_application_pid_helper(bool ignore_cache)
{
static char pidstr[16];
if (!pidstr[0] || ignore_cache) {
int pid = get_process_id();
snprintf(pidstr, sizeof(pidstr)-1, "%d", pid);
}
return pidstr;
}
/* get application pid, (cached), used for event logging */
char*
get_application_pid()
{
return get_application_pid_helper(false);
}
/* i#907: Called during early injection before data section protection to avoid
* issues with /proc/self/exe.
*/
void
set_executable_path(const char *exe_path)
{
strncpy(executable_path, exe_path, BUFFER_SIZE_ELEMENTS(executable_path));
NULL_TERMINATE_BUFFER(executable_path);
}
/* The OSX kernel used to place the bare executable path above envp.
* On recent XNU versions, the kernel now prefixes the executable path
* with the string executable_path= so it can be parsed getenv style.
*/
#ifdef MACOS
# define EXECUTABLE_KEY "executable_path="
#endif
/* i#189: we need to re-cache after a fork */
static char *
get_application_name_helper(bool ignore_cache, bool full_path)
{
if (!executable_path[0] || ignore_cache) {
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
vmk_getnamefrompid(pid, executable_path, sizeof(executable_path));
} else
#endif
if (DYNAMO_OPTION(early_inject)) {
ASSERT(executable_path[0] != '\0' &&
"i#907: Can't read /proc/self/exe for early injection");
} else {
#ifdef LINUX
/* Populate cache from /proc/self/exe link. */
strncpy(executable_path, read_proc_self_exe(ignore_cache),
BUFFER_SIZE_ELEMENTS(executable_path));
#else
/* OSX kernel puts full app exec path above envp */
char *c, **env = our_environ;
do {
env++;
} while (*env != NULL);
env++; /* Skip the NULL separating the envp array from exec_path */
c = *env;
if (strncmp(EXECUTABLE_KEY, c, strlen(EXECUTABLE_KEY)) == 0) {
c += strlen(EXECUTABLE_KEY);
}
/* If our frontends always absolute-ize paths prior to exec,
* this should usually be absolute -- but we go ahead and
* handle relative just in case (and to handle child processes).
* We add the cur dir, but note that the resulting path can
* still contain . or .. so it's not normalized (but it is a
* correct absolute path). Xref i#1402, i#1406, i#1407.
*/
if (*c != '/') {
int len;
if (!os_get_current_dir(executable_path,
BUFFER_SIZE_ELEMENTS(executable_path)))
len = 0;
else
len = strlen(executable_path);
snprintf(executable_path + len,
BUFFER_SIZE_ELEMENTS(executable_path) - len,
"%s%s", len > 0 ? "/" : "", c);
} else
strncpy(executable_path, c, BUFFER_SIZE_ELEMENTS(executable_path));
#endif
NULL_TERMINATE_BUFFER(executable_path);
/* FIXME: Fall back on /proc/self/cmdline and maybe argv[0] from
* _init().
*/
ASSERT(strlen(executable_path) > 0 &&
"readlink /proc/self/exe failed");
}
}
/* Get basename. */
if (executable_basename == NULL || ignore_cache) {
executable_basename = strrchr(executable_path, '/');
executable_basename = (executable_basename == NULL ?
executable_path : executable_basename + 1);
}
return (full_path ? executable_path : executable_basename);
}
/* get application name, (cached), used for event logging */
char *
get_application_name(void)
{
return get_application_name_helper(false, true /* full path */);
}
/* Note: this is exported so that libdrpreload.so (preload.c) can use it to
* get process names to do selective process following (PR 212034). The
* alternative is to duplicate or compile in this code into libdrpreload.so,
* which is messy. Besides, libdynamorio.so is already loaded into the process
* and avaiable, so cleaner to just use functions from it.
*/
DYNAMORIO_EXPORT const char *
get_application_short_name(void)
{
return get_application_name_helper(false, false /* short name */);
}
/* Processor information provided by kernel */
#define PROC_CPUINFO "/proc/cpuinfo"
#define CPUMHZ_LINE_LENGTH 64
#define CPUMHZ_LINE_FORMAT "cpu MHz\t\t: %lu.%03lu\n"
/* printed in /usr/src/linux-2.4/arch/i386/kernel/setup.c calibrated in time.c */
/* seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", cpu_khz / 1000, (cpu_khz % 1000)) */
/* e.g. cpu MHz : 1594.851 */
static timestamp_t
get_timer_frequency_cpuinfo(void)
{
file_t cpuinfo;
ssize_t nread;
char *buf;
char *mhz_line;
ulong cpu_mhz = 1000;
ulong cpu_khz = 0;
cpuinfo = os_open(PROC_CPUINFO, OS_OPEN_READ);
/* This can happen in a chroot or if /proc is disabled. */
if (cpuinfo == INVALID_FILE)
return 1000 * 1000; /* 1 GHz */
/* cpu MHz is typically in the first 4096 bytes. If not, or we get a short
* or interrupted read, our timer frequency estimate will be off, but it's
* not the end of the world.
* FIXME: Factor a buffered file reader out of our maps iterator if we want
* to do this the right way.
*/
buf = global_heap_alloc(PAGE_SIZE HEAPACCT(ACCT_OTHER));
nread = os_read(cpuinfo, buf, PAGE_SIZE - 1);
if (nread > 0) {
buf[nread] = '\0';
mhz_line = strstr(buf, "cpu MHz\t\t:");
if (mhz_line != NULL &&
sscanf(mhz_line, CPUMHZ_LINE_FORMAT, &cpu_mhz, &cpu_khz) == 2) {
LOG(GLOBAL, LOG_ALL, 2, "Processor speed exactly %lu.%03luMHz\n",
cpu_mhz, cpu_khz);
}
}
global_heap_free(buf, PAGE_SIZE HEAPACCT(ACCT_OTHER));
os_close(cpuinfo);
return cpu_mhz * 1000 + cpu_khz;
}
timestamp_t
get_timer_frequency()
{
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
return vmk_get_timer_frequency();
}
#endif
return get_timer_frequency_cpuinfo();
}
/* DR has standardized on UTC time which counts from since Jan 1, 1601.
* That's the Windows standard. But Linux uses the Epoch of Jan 1, 1970.
*/
#define UTC_TO_EPOCH_SECONDS 11644473600
/* seconds since 1601 */
uint
query_time_seconds(void)
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val < 0)
return 0;
return (uint)val + UTC_TO_EPOCH_SECONDS;
}
#endif
if ((int)val >= 0) {
return current_time.tv_sec + UTC_TO_EPOCH_SECONDS;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* milliseconds since 1601 */
uint64
query_time_millis()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint) val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res = (((uint64)current_time.tv_sec) * 1000) +
(current_time.tv_usec / 1000);
res += UTC_TO_EPOCH_SECONDS * 1000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* microseconds since 1601 */
uint64
query_time_micros()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint) val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res = (((uint64)current_time.tv_sec) * 1000000) +
current_time.tv_usec;
res += UTC_TO_EPOCH_SECONDS * 1000000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
#ifdef RETURN_AFTER_CALL
/* Finds the bottom of the call stack, presumably at program startup. */
/* This routine is a copycat of internal_dump_callstack and makes assumptions about program state,
i.e. that frame pointers are valid and should be used only in well known points for release build.
*/
static app_pc
find_stack_bottom()
{
app_pc retaddr = 0;
int depth = 0;
reg_t *fp;
/* from dump_dr_callstack() */
asm("mov %%"ASM_XBP", %0" : "=m"(fp));
LOG(THREAD_GET, LOG_ALL, 3, "Find stack bottom:\n");
while (fp != NULL && is_readable_without_exception((byte *)fp, sizeof(reg_t)*2)) {
retaddr = (app_pc)*(fp+1); /* presumably also readable */
LOG(THREAD_GET, LOG_ALL, 3,
"\tframe ptr "PFX" => parent "PFX", ret = "PFX"\n", fp, *fp, retaddr);
depth++;
/* yes I've seen weird recursive cases before */
if (fp == (reg_t *) *fp || depth > 100)
break;
fp = (reg_t *) *fp;
}
return retaddr;
}
#endif /* RETURN_AFTER_CALL */
/* os-specific atexit cleanup */
void
os_slow_exit(void)
{
signal_exit();
memquery_exit();
ksynch_exit();
generic_hash_destroy(GLOBAL_DCONTEXT, fd_table);
fd_table = NULL;
if (doing_detach) {
vsyscall_page_start = NULL;
IF_DEBUG(num_fd_add_pre_heap = 0;)
}
DELETE_LOCK(set_thread_area_lock);
#ifdef CLIENT_INTERFACE
DELETE_LOCK(client_tls_lock);
#endif
IF_NO_MEMQUERY(memcache_exit());
}
/* os-specific atexit cleanup */
void
os_fast_exit(void)
{
/* nothing */
}
void
os_terminate_with_code(dcontext_t *dcontext, terminate_flags_t flags, int exit_code)
{
/* i#1319: we support a signal via 2nd byte */
bool use_signal = exit_code > 0x00ff;
/* XXX: TERMINATE_THREAD not supported */
ASSERT_NOT_IMPLEMENTED(TEST(TERMINATE_PROCESS, flags));
if (use_signal) {
int sig = (exit_code & 0xff00) >> 8;
os_terminate_via_signal(dcontext, flags, sig);
ASSERT_NOT_REACHED();
}
if (TEST(TERMINATE_CLEANUP, flags)) {
/* we enter from several different places, so rewind until top-level kstat */
KSTOP_REWIND_UNTIL(thread_measured);
cleanup_and_terminate(dcontext, SYSNUM_EXIT_PROCESS, exit_code, 0,
true/*whole process*/, 0, 0);
} else {
/* clean up may be impossible - just terminate */
config_exit(); /* delete .1config file */
exit_process_syscall(exit_code);
}
}
void
os_terminate(dcontext_t *dcontext, terminate_flags_t flags)
{
os_terminate_with_code(dcontext, flags, -1);
}
int
os_timeout(int time_in_milliseconds)
{
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
/************************************************************************
* SEGMENT STEALING
*
* Not easy to make truly transparent -- but the alternative of dispatch
* by thread id on global memory has performance implications.
* Pull the non-STEAL_SEGMENT code out of the cvs attic for a base if
* transparency becomes more of a problem.
*/
#define TLS_LOCAL_STATE_OFFSET (offsetof(os_local_state_t, state))
/* offset from top of page */
#define TLS_OS_LOCAL_STATE 0x00
#define TLS_SELF_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, self))
#define TLS_THREAD_ID_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, tid))
#define TLS_DCONTEXT_OFFSET (TLS_OS_LOCAL_STATE + TLS_DCONTEXT_SLOT)
#ifdef X86
# define TLS_MAGIC_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, magic))
#endif
/* they should be used with os_tls_offset, so do not need add TLS_OS_LOCAL_STATE here */
#define TLS_APP_LIB_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_lib_tls_base))
#define TLS_APP_ALT_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_alt_tls_base))
#define TLS_APP_LIB_TLS_REG_OFFSET (offsetof(os_local_state_t, app_lib_tls_reg))
#define TLS_APP_ALT_TLS_REG_OFFSET (offsetof(os_local_state_t, app_alt_tls_reg))
/* N.B.: imm and offs are ushorts!
* We use %c[0-9] to get gcc to emit an integer constant without a leading $ for
* the segment offset. See the documentation here:
* http://gcc.gnu.org/onlinedocs/gccint/Output-Template.html#Output-Template
* Also, var needs to match the pointer size, or else we'll get stack corruption.
* XXX: This is marked volatile prevent gcc from speculating this code before
* checks for is_thread_tls_initialized(), but if we could find a more
* precise constraint, then the compiler would be able to optimize better. See
* glibc comments on THREAD_SELF.
*/
#ifdef X86
# define WRITE_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void*)); \
asm volatile("mov %0, %"ASM_SEG":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void*)); \
asm volatile("mov %"ASM_SEG":%c1, %0" : "=r"(var) : "i"(imm));
# define WRITE_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %0, %"ASM_SEG":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %"ASM_SEG":%c1, %0" : "=r"(var) : "i"(imm));
/* FIXME: need dedicated-storage var for _TLS_SLOT macros, can't use expr */
# define WRITE_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void*)); \
ASSERT(sizeof(offs) == 2); \
asm("mov %0, %%"ASM_XAX : : "m"((var)) : ASM_XAX); \
asm("movzw"IF_X64_ELSE("q","l")" %0, %%"ASM_XDX : : "m"((offs)) : ASM_XDX); \
asm("mov %%"ASM_XAX", %"ASM_SEG":(%%"ASM_XDX")" : : : ASM_XAX, ASM_XDX);
# define READ_TLS_SLOT(offs, var) \
ASSERT(sizeof(var) == sizeof(void*)); \
ASSERT(sizeof(offs) == 2); \
asm("movzw"IF_X64_ELSE("q","l")" %0, %%"ASM_XAX : : "m"((offs)) : ASM_XAX); \
asm("mov %"ASM_SEG":(%%"ASM_XAX"), %%"ASM_XAX : : : ASM_XAX); \
asm("mov %%"ASM_XAX", %0" : "=m"((var)) : : ASM_XAX);
#elif defined(AARCHXX)
/* Android needs indirection through a global. The Android toolchain has
* trouble with relocations if we use a global directly in asm, so we convert to
* a local variable in these macros. We pay the cost of the extra instructions
* for Linux ARM to share the code.
*/
# define WRITE_TLS_SLOT_IMM(imm, var) do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__( \
"mov "ASM_R2", %0 \n\t" \
READ_TP_TO_R3_DISP_IN_R2 \
"str %1, ["ASM_R3", %2] \n\t" \
: : "r" (_base_offs), "r" (var), "i" (imm) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT_IMM(imm, var) do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__( \
"mov "ASM_R2", %1 \n\t" \
READ_TP_TO_R3_DISP_IN_R2 \
"ldr %0, ["ASM_R3", %2] \n\t" \
: "=r" (var) \
: "r" (_base_offs), "i" (imm) \
: ASM_R2, ASM_R3); \
} while (0)
# define WRITE_TLS_INT_SLOT_IMM WRITE_TLS_SLOT_IMM /* b/c 32-bit */
# define READ_TLS_INT_SLOT_IMM READ_TLS_SLOT_IMM /* b/c 32-bit */
# define WRITE_TLS_SLOT(offs, var) do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__( \
"mov "ASM_R2", %0 \n\t" \
READ_TP_TO_R3_DISP_IN_R2 \
"add "ASM_R3", "ASM_R3", %2 \n\t" \
"str %1, ["ASM_R3"] \n\t" \
: : "r" (_base_offs), "r" (var), "r" (offs) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT(offs, var) do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__( \
"mov "ASM_R2", %1 \n\t" \
READ_TP_TO_R3_DISP_IN_R2 \
"add "ASM_R3", "ASM_R3", %2 \n\t" \
"ldr %0, ["ASM_R3"] \n\t" \
: "=r" (var) \
: "r" (_base_offs), "r" (offs) \
: ASM_R2, ASM_R3); \
} while (0)
#endif /* X86/ARM */
#ifdef X86
/* We use this at thread init and exit to make it easy to identify
* whether TLS is initialized (i#2089).
* We assume alignment does not matter.
*/
static os_local_state_t uninit_tls; /* has .magic == 0 */
#endif
static bool
is_thread_tls_initialized(void)
{
#ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* Avoid faults during early init or during exit when we have no handler.
* It's not worth extending the handler as the faults are a perf hit anyway.
*/
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
/* To handle WSL (i#1986) where fs and gs start out equal to ss (0x2b),
* and when the MSR is used having a zero selector, and other complexities,
* we just do a blind safe read as the simplest solution once we're past
* initial init and have a fault handler.
*
* i#2089: to avoid the perf cost of syscalls to verify the tid, and to
* distinguish a fork child from a separate-group thread, we no longer read
* the tid field and check that the TLS belongs to this particular thread:
* instead we rely on clearing the .magic field for child threads and at
* thread exit (to avoid a fault) and we simply check the field here.
* A native app thread is very unlikely to match this.
*/
return safe_read_tls_magic() == TLS_MAGIC_VALID;
} else {
/* XXX i#2089: we're keeping this legacy code around until
* we're confident that the safe read code above is safer, more
* performant, and more robust.
*/
os_local_state_t *os_tls = NULL;
ptr_uint_t cur_seg = read_thread_register(SEG_TLS);
/* Handle WSL (i#1986) where fs and gs start out equal to ss (0x2b) */
if (cur_seg != 0 && cur_seg != read_thread_register(SEG_SS)) {
/* XXX: make this a safe read: but w/o dcontext we need special asm support */
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
}
# ifdef X64
if (os_tls == NULL && tls_dr_using_msr()) {
/* When the MSR is used, the selector in the register remains 0.
* We can't clear the MSR early in a new thread and then look for
* a zero base here b/c if kernel decides to use GDT that zeroing
* will set the selector, unless we want to assume we know when
* the kernel uses the GDT.
* Instead we make a syscall to get the tid. This should be ok
* perf-wise b/c the common case is the non-zero above.
*/
byte *base = tls_get_fs_gs_segment_base(SEG_TLS);
ASSERT(tls_global_type == TLS_TYPE_ARCH_PRCTL);
if (base != (byte *) POINTER_MAX && base != NULL) {
os_tls = (os_local_state_t *) base;
}
}
# endif
if (os_tls != NULL) {
return (os_tls->tid == get_sys_thread_id() ||
/* The child of a fork will initially come here */
os_tls->state.spill_space.dcontext->owning_process ==
get_parent_id());
} else
return false;
}
#elif defined(AARCHXX)
byte **dr_tls_base_addr;
if (tls_global_type == TLS_TYPE_NONE)
return false;
dr_tls_base_addr = (byte **)get_dr_tls_base_addr();
if (dr_tls_base_addr == NULL ||
*dr_tls_base_addr == NULL ||
/* We use the TLS slot's value to identify a now-exited thread (i#1578) */
*dr_tls_base_addr == TLS_SLOT_VAL_EXITED)
return false;
/* We would like to ASSERT is_dynamo_address(*tls_swap_slot) but that leads
* to infinite recursion for an address not in the vm_reserve area, as
* dynamo_vm_areas_start_reading() ending up calling
* deadlock_avoidance_unlock() which calls get_thread_private_dcontext()
* which comes here.
*/
return true;
#endif
}
#if defined(X86) || defined(DEBUG)
static bool
is_thread_tls_allocated(void)
{
# ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* We use this routine to allow currently-native threads, for which
* is_thread_tls_initialized() (and thus is_thread_initialized()) will
* return false.
* Caution: this will also return true on a fresh clone child.
*/
uint magic;
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
magic = safe_read_tls_magic();
return magic == TLS_MAGIC_VALID || magic == TLS_MAGIC_INVALID;
}
# endif
return is_thread_tls_initialized();
}
#endif
/* converts a local_state_t offset to a segment offset */
ushort
os_tls_offset(ushort tls_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (TLS_LOCAL_STATE_OFFSET + tls_offs);
}
/* XXX: Will return NULL if called before os_thread_init(), which sets
* ostd->dr_fs/gs_base.
*/
void *
os_get_priv_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_thread_data_t *ostd;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_ALT || reg == TLS_REG_LIB);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return NULL;
ostd = (os_thread_data_t *)dcontext->os_field;
if (reg == TLS_REG_LIB)
return ostd->priv_lib_tls_base;
else if (reg == TLS_REG_ALT)
return ostd->priv_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
os_local_state_t *
get_os_tls(void)
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return os_tls;
}
/* Obtain TLS from dcontext directly, which succeeds in pre-thread-init
* situations where get_os_tls() fails.
*/
static os_local_state_t *
get_os_tls_from_dc(dcontext_t *dcontext)
{
byte *local_state;
ASSERT(dcontext != NULL);
local_state = (byte*)dcontext->local_state;
if (local_state == NULL)
return NULL;
return (os_local_state_t *)(local_state - offsetof(os_local_state_t, state));
}
#ifdef AARCHXX
bool
os_set_app_tls_base(dcontext_t *dcontext, reg_id_t reg, void *base)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
/* we will be called only if TLS is initialized */
ASSERT(dcontext != NULL);
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB) {
os_tls->app_lib_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base ="PFX"\n", base);
return true;
} else if (reg == TLS_REG_ALT) {
os_tls->app_alt_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base ="PFX"\n", base);
return true;
}
ASSERT_NOT_REACHED();
return false;
}
#endif
void *
os_get_app_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL) {
/* No dcontext means we haven't initialized TLS, so we haven't replaced
* the app's segments. get_segment_base is expensive, but this should
* be rare. Re-examine if it pops up in a profile.
*/
return get_segment_base(reg);
}
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB)
return os_tls->app_lib_tls_base;
else if (reg == TLS_REG_ALT)
return os_tls->app_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
ushort
os_get_app_tls_base_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_BASE_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_BASE_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#ifdef X86
ushort
os_get_app_tls_reg_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_REG_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_REG_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#endif
void *
get_tls(ushort tls_offs)
{
void *val;
READ_TLS_SLOT(tls_offs, val);
return val;
}
void
set_tls(ushort tls_offs, void *value)
{
WRITE_TLS_SLOT(tls_offs, value);
}
/* Returns POINTER_MAX on failure.
* Assumes that cs, ss, ds, and es are flat.
* Should we export this to clients? For now they can get
* this information via opnd_compute_address().
*/
byte *
get_segment_base(uint seg)
{
#ifdef X86
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
# ifdef HAVE_TLS
return tls_get_fs_gs_segment_base(seg);
# else
return (byte *) POINTER_MAX;
#endif /* HAVE_TLS */
#elif defined(AARCHXX)
/* XXX i#1551: should we rename/refactor to avoid "segment"? */
return (byte *) read_thread_register(seg);
#endif
}
/* i#572: handle opnd_compute_address to return the application
* segment base value.
*/
byte *
get_app_segment_base(uint seg)
{
#ifdef X86
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
#endif /* X86 */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
return get_tls(os_get_app_tls_base_offset(seg));
}
return get_segment_base(seg);
}
local_state_extended_t *
get_local_state_extended()
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return &(os_tls->state);
}
local_state_t *
get_local_state()
{
#ifdef HAVE_TLS
return (local_state_t *) get_local_state_extended();
#else
return NULL;
#endif
}
#ifdef DEBUG
void
os_enter_dynamorio(void)
{
# ifdef ARM
/* i#1578: check that app's tls value doesn't match our sentinel */
ASSERT(*(byte **)get_dr_tls_base_addr() != TLS_SLOT_VAL_EXITED);
# endif
}
#endif
/* i#107: handle segment register usage conflicts between app and dr:
* os_handle_mov_seg updates the app's tls selector maintained by DR.
* It is called before entering code cache in dispatch_enter_fcache.
*/
void
os_handle_mov_seg(dcontext_t *dcontext, byte *pc)
{
#ifdef X86
instr_t instr;
opnd_t opnd;
reg_id_t seg;
ushort sel = 0;
our_modify_ldt_t *desc;
int desc_idx;
os_local_state_t *os_tls;
os_thread_data_t *ostd;
instr_init(dcontext, &instr);
decode_cti(dcontext, pc, &instr);
/* the first instr must be mov seg */
ASSERT(instr_get_opcode(&instr) == OP_mov_seg);
opnd = instr_get_dst(&instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
ostd = (os_thread_data_t *)dcontext->os_field;
desc = (our_modify_ldt_t *)ostd->app_thread_areas;
os_tls = get_os_tls();
/* get the selector value */
opnd = instr_get_src(&instr, 0);
if (opnd_is_reg(opnd)) {
sel = (ushort)reg_get_value_priv(opnd_get_reg(opnd),
get_mcontext(dcontext));
} else {
void *ptr;
ptr = (ushort *)opnd_compute_address_priv(opnd, get_mcontext(dcontext));
ASSERT(ptr != NULL);
if (!safe_read(ptr, sizeof(sel), &sel)) {
/* FIXME: if invalid address, should deliver a signal to user. */
ASSERT_NOT_IMPLEMENTED(false);
}
}
/* calculate the entry_number */
desc_idx = SELECTOR_INDEX(sel) - tls_min_index();
if (seg == TLS_REG_LIB) {
os_tls->app_lib_tls_reg = sel;
os_tls->app_lib_tls_base = (void *)(ptr_uint_t) desc[desc_idx].base_addr;
} else {
os_tls->app_alt_tls_reg = sel;
os_tls->app_alt_tls_base = (void *)(ptr_uint_t) desc[desc_idx].base_addr;
}
instr_free(dcontext, &instr);
LOG(THREAD_GET, LOG_THREADS, 2,
"thread "TIDFMT" segment change %s to selector 0x%x => "
"app lib tls base: "PFX", alt tls base: "PFX"\n",
get_thread_id(), reg_names[seg], sel,
os_tls->app_lib_tls_base, os_tls->app_alt_tls_base);
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
#endif /* X86/ARM */
}
/* Initialization for TLS mangling (-mangle_app_seg on x86).
* Must be called before DR setup its own segment.
*/
static void
os_tls_app_seg_init(os_local_state_t *os_tls, void *segment)
{
app_pc app_lib_tls_base, app_alt_tls_base;
#ifdef X86
int i, index;
our_modify_ldt_t *desc;
os_tls->app_lib_tls_reg = read_thread_register(TLS_REG_LIB);
os_tls->app_alt_tls_reg = read_thread_register(TLS_REG_ALT);
#endif
app_lib_tls_base = get_segment_base(TLS_REG_LIB);
app_alt_tls_base = get_segment_base(TLS_REG_ALT);
/* If we're a non-initial thread, tls will be set to the parent's value,
* or to &uninit_tls (i#2089), both of which will be is_dynamo_address().
*/
os_tls->app_lib_tls_base =
is_dynamo_address(app_lib_tls_base) ? NULL : app_lib_tls_base;
os_tls->app_alt_tls_base =
is_dynamo_address(app_alt_tls_base) ? NULL : app_alt_tls_base;
#ifdef X86
/* get all TLS thread area value */
/* XXX: is get_thread_area supported in 64-bit kernel?
* It has syscall number 211.
* It works for a 32-bit application running in a 64-bit kernel.
* It returns error value -38 for a 64-bit app in a 64-bit kernel.
*/
desc = &os_tls->os_seg_info.app_thread_areas[0];
tls_initialize_indices(os_tls);
index = tls_min_index();
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
tls_get_descriptor(i + index, &desc[i]);
}
#endif /* X86 */
os_tls->os_seg_info.dr_tls_base = segment;
os_tls->os_seg_info.priv_alt_tls_base = IF_X86_ELSE(segment, NULL);
/* now allocate the tls segment for client libraries */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_tls->os_seg_info.priv_lib_tls_base =
IF_UNIT_TEST_ELSE(os_tls->app_lib_tls_base,
privload_tls_init(os_tls->app_lib_tls_base));
}
#ifdef X86
LOG(THREAD_GET, LOG_THREADS, 1,
"thread "TIDFMT" app lib tls reg: 0x%x, alt tls reg: 0x%x\n",
get_thread_id(), os_tls->app_lib_tls_reg, os_tls->app_alt_tls_reg);
#endif
LOG(THREAD_GET, LOG_THREADS, 1,
"thread "TIDFMT" app lib tls base: "PFX", alt tls base: "PFX"\n",
get_thread_id(), os_tls->app_lib_tls_base, os_tls->app_alt_tls_base);
LOG(THREAD_GET, LOG_THREADS, 1,
"thread "TIDFMT" priv lib tls base: "PFX", alt tls base: "PFX", "
"DR's tls base: "PFX"\n",
get_thread_id(),
os_tls->os_seg_info.priv_lib_tls_base,
os_tls->os_seg_info.priv_alt_tls_base,
os_tls->os_seg_info.dr_tls_base);
}
void
os_tls_init(void)
{
#ifdef X86
ASSERT(TLS_MAGIC_OFFSET_ASM == TLS_MAGIC_OFFSET);
ASSERT(TLS_SELF_OFFSET_ASM == TLS_SELF_OFFSET);
#endif
#ifdef HAVE_TLS
/* We create a 1-page segment with an LDT entry for each thread and load its
* selector into fs/gs.
* FIXME PR 205276: this whole scheme currently does not check if app is using
* segments need to watch modify_ldt syscall
*/
/* FIXME: heap_mmap marks as exec, we just want RW */
byte *segment = heap_mmap(PAGE_SIZE);
os_local_state_t *os_tls = (os_local_state_t *) segment;
LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init for thread "TIDFMT"\n", get_thread_id());
ASSERT(!is_thread_tls_initialized());
/* MUST zero out dcontext slot so uninit access gets NULL */
memset(segment, 0, PAGE_SIZE);
/* store key data in the tls itself */
os_tls->self = os_tls;
os_tls->tid = get_sys_thread_id();
os_tls->tls_type = TLS_TYPE_NONE;
#ifdef X86
os_tls->magic = TLS_MAGIC_VALID;
#endif
/* We save DR's TLS segment base here so that os_get_dr_tls_base() will work
* even when -no_mangle_app_seg is set. If -mangle_app_seg is set, this
* will be overwritten in os_tls_app_seg_init().
*/
os_tls->os_seg_info.dr_tls_base = segment;
ASSERT(proc_is_cache_aligned(os_tls->self + TLS_LOCAL_STATE_OFFSET));
/* Verify that local_state_extended_t should indeed be used. */
ASSERT(DYNAMO_OPTION(ibl_table_in_tls));
/* initialize DR TLS seg base before replacing app's TLS in tls_thread_init */
if (MACHINE_TLS_IS_DR_TLS)
os_tls_app_seg_init(os_tls, segment);
tls_thread_init(os_tls, segment);
ASSERT(os_tls->tls_type != TLS_TYPE_NONE);
/* store type in global var for convenience: should be same for all threads */
tls_global_type = os_tls->tls_type;
/* FIXME: this should be a SYSLOG fatal error? Should fall back on !HAVE_TLS?
* Should have create_ldt_entry() return failure instead of asserting, then.
*/
#else
tls_table = (tls_slot_t *)
global_heap_alloc(MAX_THREADS*sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
memset(tls_table, 0, MAX_THREADS*sizeof(tls_slot_t));
#endif
if (!first_thread_tls_initialized) {
first_thread_tls_initialized = true;
if (last_thread_tls_exited) /* re-attach */
last_thread_tls_exited = false;
}
ASSERT(is_thread_tls_initialized());
}
static bool
should_zero_tls_at_thread_exit()
{
#ifdef X86
/* i#2089: For a thread w/o CLONE_SIGHAND we cannot handle a fault, so we want to
* leave &uninit_tls (which was put in place in os_thread_exit()) as long as
* possible. For non-detach, that means until the exit.
*/
return !INTERNAL_OPTION(safe_read_tls_init) || doing_detach;
#else
return true;
#endif
}
/* TLS exit for the current thread who must own local_state. */
void
os_tls_thread_exit(local_state_t *local_state)
{
#ifdef HAVE_TLS
/* We assume (assert below) that local_state_t's start == local_state_extended_t */
os_local_state_t *os_tls = (os_local_state_t *)
(((byte*)local_state) - offsetof(os_local_state_t, state));
tls_type_t tls_type = os_tls->tls_type;
int index = os_tls->ldt_index;
ASSERT(offsetof(local_state_t, spill_space) ==
offsetof(local_state_extended_t, spill_space));
if (should_zero_tls_at_thread_exit()) {
tls_thread_free(tls_type, index);
# if defined(X86) && defined(X64)
if (tls_type == TLS_TYPE_ARCH_PRCTL) {
/* syscall re-sets gs register so re-clear it */
if (read_thread_register(SEG_TLS) != 0) {
static const ptr_uint_t zero = 0;
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
}
# endif
}
/* We already set TLS to &uninit_tls in os_thread_exit() */
if (dynamo_exited && !last_thread_tls_exited) {
last_thread_tls_exited = true;
first_thread_tls_initialized = false; /* for possible re-attach */
}
#endif
}
/* Frees local_state. If the calling thread is exiting (i.e.,
* !other_thread) then also frees kernel resources for the calling
* thread; if other_thread then that may not be possible.
*/
void
os_tls_exit(local_state_t *local_state, bool other_thread)
{
#ifdef HAVE_TLS
# ifdef X86
static const ptr_uint_t zero = 0;
# endif /* X86 */
/* We can't read from fs: as we can be called from other threads */
/* ASSUMPTION: local_state_t is laid out at same start as local_state_extended_t */
os_local_state_t *os_tls = (os_local_state_t *)
(((byte*)local_state) - offsetof(os_local_state_t, state));
# ifdef X86
/* If the MSR is in use, writing to the reg faults. We rely on it being 0
* to indicate that.
*/
if (!other_thread && read_thread_register(SEG_TLS) != 0 &&
should_zero_tls_at_thread_exit()) {
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
# endif /* X86 */
/* For another thread we can't really make these syscalls so we have to
* leave it un-cleaned-up. That's fine if the other thread is exiting:
* but for detach (i#95) we get the other thread to run this code.
*/
if (!other_thread)
os_tls_thread_exit(local_state);
/* We can't free prior to tls_thread_free() in case that routine refs os_tls */
heap_munmap(os_tls->self, PAGE_SIZE);
#else
global_heap_free(tls_table, MAX_THREADS*sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
DELETE_LOCK(tls_lock);
#endif
}
static int
os_tls_get_gdt_index(dcontext_t *dcontext)
{
os_local_state_t *os_tls = (os_local_state_t *)
(((byte*)dcontext->local_state) - offsetof(os_local_state_t, state));
if (os_tls->tls_type == TLS_TYPE_GDT)
return os_tls->ldt_index;
else
return -1;
}
void
os_tls_pre_init(int gdt_index)
{
#ifdef X86
/* Only set to above 0 for tls_type == TLS_TYPE_GDT */
if (gdt_index > 0) {
/* PR 458917: clear gdt slot to avoid leak across exec */
DEBUG_DECLARE(bool ok;)
static const ptr_uint_t zero = 0;
/* Be sure to clear the selector before anything that might
* call get_thread_private_dcontext()
*/
WRITE_DR_SEG(zero); /* macro needs lvalue! */
DEBUG_DECLARE(ok = )
tls_clear_descriptor(gdt_index);
ASSERT(ok);
}
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
#ifdef CLIENT_INTERFACE
/* Allocates num_slots tls slots aligned with alignment align */
bool
os_tls_calloc(OUT uint *offset, uint num_slots, uint alignment)
{
bool res = false;
uint i, count = 0;
int start = -1;
uint offs = offsetof(os_local_state_t, client_tls);
if (num_slots == 0 || num_slots > MAX_NUM_CLIENT_TLS)
return false;
mutex_lock(&client_tls_lock);
for (i = 0; i < MAX_NUM_CLIENT_TLS; i++) {
if (!client_tls_allocated[i] &&
/* ALIGNED doesn't work for 0 */
(alignment == 0 || ALIGNED(offs + i*sizeof(void*), alignment))) {
if (start == -1)
start = i;
count++;
if (count >= num_slots)
break;
} else {
start = -1;
count = 0;
}
}
if (count >= num_slots) {
for (i = 0; i < num_slots; i++)
client_tls_allocated[i + start] = true;
*offset = offs + start*sizeof(void*);
res = true;
}
mutex_unlock(&client_tls_lock);
return res;
}
bool
os_tls_cfree(uint offset, uint num_slots)
{
uint i;
uint offs = (offset - offsetof(os_local_state_t, client_tls))/sizeof(void*);
bool ok = true;
mutex_lock(&client_tls_lock);
for (i = 0; i < num_slots; i++) {
if (!client_tls_allocated[i + offs])
ok = false;
client_tls_allocated[i + offs] = false;
}
mutex_unlock(&client_tls_lock);
return ok;
}
#endif
void
os_thread_init(dcontext_t *dcontext)
{
os_local_state_t *os_tls = get_os_tls();
os_thread_data_t *ostd = (os_thread_data_t *)
heap_alloc(dcontext, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
dcontext->os_field = (void *) ostd;
/* make sure stack fields, etc. are 0 now so they can be initialized on demand
* (don't have app esp register handy here to init now)
*/
memset(ostd, 0, sizeof(*ostd));
ksynch_init_var(&ostd->suspended);
ksynch_init_var(&ostd->wakeup);
ksynch_init_var(&ostd->resumed);
ksynch_init_var(&ostd->terminated);
ksynch_init_var(&ostd->detached);
#ifdef RETURN_AFTER_CALL
/* We only need the stack bottom for the initial thread, and due to thread
* init now preceding vm_areas_init(), we initialize in find_executable_vm_areas()
*/
ostd->stack_bottom_pc = NULL;
#endif
ASSIGN_INIT_LOCK_FREE(ostd->suspend_lock, suspend_lock);
signal_thread_init(dcontext);
/* i#107, initialize thread area information,
* the value was first get in os_tls_init and stored in os_tls
*/
ostd->priv_lib_tls_base = os_tls->os_seg_info.priv_lib_tls_base;
ostd->priv_alt_tls_base = os_tls->os_seg_info.priv_alt_tls_base;
ostd->dr_tls_base = os_tls->os_seg_info.dr_tls_base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base ="PFX"\n", os_tls->app_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base ="PFX"\n", os_tls->app_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv lib base ="PFX"\n", ostd->priv_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv alt base ="PFX"\n", ostd->priv_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS DynamoRIO base="PFX"\n", ostd->dr_tls_base);
#ifdef X86
if (INTERNAL_OPTION(mangle_app_seg)) {
ostd->app_thread_areas =
heap_alloc(dcontext, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS
HEAPACCT(ACCT_OTHER));
memcpy(ostd->app_thread_areas,
os_tls->os_seg_info.app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS);
}
#endif
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is "PFX"\n",
IF_X86_ELSE("gs", "tpidruro"),
get_segment_base(IF_X86_ELSE(SEG_GS, DR_REG_TPIDRURO)));
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is "PFX"\n",
IF_X86_ELSE("fs", "tpidrurw"),
get_segment_base(IF_X86_ELSE(SEG_FS, DR_REG_TPIDRURW)));
#ifdef MACOS
/* XXX: do we need to free/close dcontext->thread_port? I don't think so. */
dcontext->thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
LOG(THREAD, LOG_ALL, 1, "Mach thread port: %d\n", dcontext->thread_port);
#endif
}
void
os_thread_exit(dcontext_t *dcontext, bool other_thread)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
/* i#237/PR 498284: if we had a vfork child call execve we need to clean up
* the env vars.
*/
if (dcontext->thread_record->execve)
handle_execve_post(dcontext);
DELETE_LOCK(ostd->suspend_lock);
signal_thread_exit(dcontext, other_thread);
ksynch_free_var(&ostd->suspended);
ksynch_free_var(&ostd->wakeup);
ksynch_free_var(&ostd->resumed);
ksynch_free_var(&ostd->terminated);
ksynch_free_var(&ostd->detached);
#ifdef X86
if (ostd->clone_tls != NULL) {
if (!other_thread) {
/* Avoid faults in is_thread_tls_initialized() */
/* FIXME i#2088: we need to restore the app's aux seg, if any, instead. */
os_set_dr_tls_base(dcontext, NULL, (byte *)&uninit_tls);
}
DODEBUG({
HEAP_TYPE_FREE(dcontext, ostd->clone_tls, os_local_state_t,
ACCT_THREAD_MGT, UNPROTECTED);
});
}
#endif
/* for non-debug we do fast exit path and don't free local heap */
DODEBUG({
if (MACHINE_TLS_IS_DR_TLS) {
#ifdef X86
heap_free(dcontext, ostd->app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS
HEAPACCT(ACCT_OTHER));
#endif
#ifdef CLIENT_INTERFACE
if (INTERNAL_OPTION(private_loader))
privload_tls_exit(IF_UNIT_TEST_ELSE(NULL, ostd->priv_lib_tls_base));
#endif
}
heap_free(dcontext, ostd, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
});
}
/* Happens in the parent prior to fork. */
static void
os_fork_pre(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
/* Otherwise a thread might wait for us. */
ASSERT_OWN_NO_LOCKS();
ASSERT(ostd->fork_threads == NULL && ostd->fork_num_threads == 0);
/* i#239: Synch with all other threads to ensure that they are holding no
* locks across the fork.
* FIXME i#26: Suspend signals received before initializing siginfo are
* squelched, so we won't be able to suspend threads that are initializing.
*/
LOG(GLOBAL, 2, LOG_SYSCALLS|LOG_THREADS,
"fork: synching with other threads to prevent deadlock in child\n");
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&ostd->fork_threads,
&ostd->fork_num_threads,
THREAD_SYNCH_VALID_MCONTEXT,
/* If we fail to suspend a thread, there is a
* risk of deadlock in the child, so it's worth
* retrying on failure.
*/
THREAD_SYNCH_SUSPEND_FAILURE_RETRY)) {
/* If we failed to synch with all threads, we live with the possiblity
* of deadlock and continue as normal.
*/
LOG(GLOBAL, 1, LOG_SYSCALLS|LOG_THREADS,
"fork: synch failed, possible deadlock in child\n");
ASSERT_CURIOSITY(false);
}
/* We go back to the code cache to execute the syscall, so we can't hold
* locks. If the synch succeeded, no one else is running, so it should be
* safe to release these locks. However, if there are any rogue threads,
* then releasing these locks will allow them to synch and create threads.
* Such threads could be running due to synch failure or presence of
* non-suspendable client threads. We keep our data in ostd to prevent some
* conflicts, but there are some unhandled corner cases.
*/
mutex_unlock(&thread_initexit_lock);
mutex_unlock(&all_threads_synch_lock);
}
/* Happens after the fork in both the parent and child. */
static void
os_fork_post(dcontext_t *dcontext, bool parent)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
/* Re-acquire the locks we released before the fork. */
mutex_lock(&all_threads_synch_lock);
mutex_lock(&thread_initexit_lock);
/* Resume the other threads that we suspended. */
if (parent) {
LOG(GLOBAL, 2, LOG_SYSCALLS|LOG_THREADS,
"fork: resuming other threads after fork\n");
}
end_synch_with_all_threads(ostd->fork_threads, ostd->fork_num_threads,
parent/*resume in parent, not in child*/);
ostd->fork_threads = NULL; /* Freed by end_synch_with_all_threads. */
ostd->fork_num_threads = 0;
}
/* this one is called before child's new logfiles are set up */
void
os_fork_init(dcontext_t *dcontext)
{
int iter;
/* We use a larger data size than file_t to avoid clobbering our stack (i#991) */
ptr_uint_t fd;
ptr_uint_t flags;
/* Static assert would save debug build overhead: could use array bound trick */
ASSERT(sizeof(file_t) <= sizeof(ptr_uint_t));
/* i#239: If there were unsuspended threads across the fork, we could have
* forked while another thread held locks. We reset the locks and try to
* cope with any intermediate state left behind from the parent. If we
* encounter more deadlocks after fork, we can add more lock and data resets
* on a case by case basis.
*/
mutex_fork_reset(&all_threads_synch_lock);
mutex_fork_reset(&thread_initexit_lock);
os_fork_post(dcontext, false/*!parent*/);
/* re-populate cached data that contains pid */
pid_cached = get_process_id();
get_application_pid_helper(true);
get_application_name_helper(true, true /* not important */);
/* close all copies of parent files */
TABLE_RWLOCK(fd_table, write, lock);
iter = 0;
do {
iter = generic_hash_iterate_next(GLOBAL_DCONTEXT, fd_table, iter,
&fd, (void **)&flags);
if (iter < 0)
break;
if (TEST(OS_OPEN_CLOSE_ON_FORK, flags)) {
close_syscall((file_t)fd);
iter = generic_hash_iterate_remove(GLOBAL_DCONTEXT, fd_table,
iter, fd);
}
} while (true);
TABLE_RWLOCK(fd_table, write, unlock);
}
static void
os_swap_dr_tls(dcontext_t *dcontext, bool to_app)
{
#ifdef X86
/* If the option is off, we really should swap it (xref i#107/i#2088 comments
* in os_swap_context()) but there are few consequences of not doing it, and we
* have no code set up separate from the i#2089 scheme here.
*/
if (!INTERNAL_OPTION(safe_read_tls_init))
return;
if (to_app) {
/* i#2089: we want the child to inherit a TLS with invalid .magic, but we
* need our own syscall execution and post-syscall code to have valid scratch
* and dcontext values. We can't clear our own magic b/c we don't know when
* the child will be scheduled, so we use a copy of our TLS. We carefully
* never have a valid magic there in case a prior child is still unscheduled.
*
* We assume the child will not modify this TLS copy in any way.
* CLONE_SETTLS touc * hes the other segment (we'll have to watch for
* addition of CLONE_SETTLS_AUX). The parent will use the scratch space
* returning from the syscall to dispatch, but we restore via os_clone_post()
* immediately before anybody calls get_thread_private_dcontext() or
* anything.
*/
/* FIXME i#2088: to preserve the app's aux seg, if any, we should pass it
* and the seg reg value via the clone record (like we do for ARM today).
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
os_local_state_t *cur_tls = get_os_tls_from_dc(dcontext);
if (ostd->clone_tls == NULL) {
ostd->clone_tls = (os_local_state_t *)
HEAP_TYPE_ALLOC(dcontext, os_local_state_t, ACCT_THREAD_MGT,
UNPROTECTED);
LOG(THREAD, LOG_THREADS, 2, "TLS copy is "PFX"\n", ostd->clone_tls);
}
/* Leave no window where a prior uninit child could read valid magic by
* invalidating prior to copying.
*/
cur_tls->magic = TLS_MAGIC_INVALID;
memcpy(ostd->clone_tls, cur_tls, sizeof(*ostd->clone_tls));
cur_tls->magic = TLS_MAGIC_VALID;
ostd->clone_tls->self = ostd->clone_tls;
os_set_dr_tls_base(dcontext, NULL, (byte *)ostd->clone_tls);
} else {
/* i#2089: restore the parent's DR TLS */
os_local_state_t *real_tls = get_os_tls_from_dc(dcontext);
/* For dr_app_start we can end up here with nothing to do, so we check. */
if (get_segment_base(SEG_TLS) != (byte *)real_tls) {
DEBUG_DECLARE(os_thread_data_t *ostd =
(os_thread_data_t *)dcontext->os_field);
ASSERT(get_segment_base(SEG_TLS) == (byte *)ostd->clone_tls);
/* We assume there's no need to copy the scratch slots back */
os_set_dr_tls_base(dcontext, real_tls, (byte *)real_tls);
}
}
#endif
}
static void
os_clone_pre(dcontext_t *dcontext)
{
/* We switch the lib tls segment back to app's segment.
* Please refer to comment on os_switch_lib_tls.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_switch_lib_tls(dcontext, true/*to app*/);
}
os_swap_dr_tls(dcontext, true/*to app*/);
}
/* This is called from dispatch prior to post_system_call() */
void
os_clone_post(dcontext_t *dcontext)
{
os_swap_dr_tls(dcontext, false/*to DR*/);
}
byte *
os_get_dr_tls_base(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
return ostd->dr_tls_base;
}
/* We only bother swapping the library segment if we're using the private
* loader.
*/
bool
os_should_swap_state(void)
{
#ifdef X86
/* -private_loader currently implies -mangle_app_seg, but let's be safe. */
return (INTERNAL_OPTION(mangle_app_seg) &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false));
#elif defined(AARCHXX)
/* FIXME i#1582: this should return true, but there is a lot of complexity
* getting os_switch_seg_to_context() to do the right then when called
* at main thread init, secondary thread init, early and late injection,
* and thread exit, since it is fragile with its writes to app TLS.
*/
return false;
#endif
}
bool
os_using_app_state(dcontext_t *dcontext)
{
#ifdef X86
/* FIXME: This could be optimized to avoid the syscall by keeping state in
* the dcontext.
*/
if (INTERNAL_OPTION(mangle_app_seg)) {
return (get_segment_base(TLS_REG_LIB) ==
os_get_app_tls_base(dcontext, TLS_REG_LIB));
}
#endif
/* We're always in the app state if we're not mangling. */
return true;
}
/* Similar to PEB swapping on Windows, this call will switch between DR's
* private lib segment base and the app's segment base.
* i#107/i#2088: If the app wants to use SEG_TLS, we should also switch that back at
* this boundary, but there are many places where we simply assume it is always
* installed.
*/
void
os_swap_context(dcontext_t *dcontext, bool to_app, dr_state_flags_t flags)
{
if (os_should_swap_state())
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
if (TEST(DR_STATE_DR_TLS, flags))
os_swap_dr_tls(dcontext, to_app);
}
void
os_swap_context_go_native(dcontext_t *dcontext, dr_state_flags_t flags)
{
#ifdef AARCHXX
/* FIXME i#1582: remove this routine once os_should_swap_state()
* is not disabled and we can actually call
* os_swap_context_go_native() safely from multiple places.
*/
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, true/*to app*/);
#else
os_swap_context(dcontext, true/*to app*/, flags);
#endif
}
void
os_thread_under_dynamo(dcontext_t *dcontext)
{
os_swap_context(dcontext, false/*to dr*/, DR_STATE_GO_NATIVE);
start_itimer(dcontext);
}
void
os_thread_not_under_dynamo(dcontext_t *dcontext)
{
stop_itimer(dcontext);
os_swap_context(dcontext, true/*to app*/, DR_STATE_GO_NATIVE);
}
void
os_process_under_dynamorio(dcontext_t *dcontext)
{
LOG(GLOBAL, LOG_THREADS, 1, "process now under DR\n");
/* We only support regular process-wide signal handlers for delayed takeover. */
signal_reinstate_handlers(dcontext);
hook_vsyscall(dcontext, false);
}
void
os_process_not_under_dynamorio(dcontext_t *dcontext)
{
/* We only support regular process-wide signal handlers for mixed-mode control. */
signal_remove_handlers(dcontext);
unhook_vsyscall();
LOG(GLOBAL, LOG_THREADS, 1, "process no longer under DR\n");
}
bool
detach_do_not_translate(thread_record_t *tr)
{
return false;
}
void
detach_finalize_translation(thread_record_t *tr, priv_mcontext_t *mc)
{
/* Nothing to do. */
}
void
detach_finalize_cleanup(void)
{
/* Nothing to do. */
}
static pid_t
get_process_group_id()
{
return dynamorio_syscall(SYS_getpgid, 0);
}
#endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */
process_id_t
get_process_id()
{
return dynamorio_syscall(SYS_getpid, 0);
}
#ifndef NOT_DYNAMORIO_CORE_PROPER /* around most of file, to exclude preload */
process_id_t
get_parent_id(void)
{
return dynamorio_syscall(SYS_getppid, 0);
}
thread_id_t
get_sys_thread_id(void)
{
#ifdef MACOS
if (kernel_thread_groups)
return dynamorio_syscall(SYS_thread_selfid, 0);
#else
if (kernel_thread_groups)
return dynamorio_syscall(SYS_gettid, 0);
#endif
return dynamorio_syscall(SYS_getpid, 0);
}
thread_id_t
get_thread_id(void)
{
/* i#228/PR 494330: making a syscall here is a perf bottleneck since we call
* this routine in read and recursive locks so use the TLS value instead
*/
thread_id_t id = get_tls_thread_id();
if (id != INVALID_THREAD_ID)
return id;
else
return get_sys_thread_id();
}
thread_id_t
get_tls_thread_id(void)
{
ptr_int_t tid; /* can't use thread_id_t since it's 32-bits */
if (!is_thread_tls_initialized())
return INVALID_THREAD_ID;
READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, tid);
/* it reads 8-bytes into the memory, which includes app_gs and app_fs.
* 0x000000007127357b <get_tls_thread_id+37>: mov %gs:(%rax),%rax
* 0x000000007127357f <get_tls_thread_id+41>: mov %rax,-0x8(%rbp)
* so we remove the TRUNCATE check and trucate it on return.
*/
return (thread_id_t) tid;
}
/* returns the thread-private dcontext pointer for the calling thread */
dcontext_t*
get_thread_private_dcontext(void)
{
#ifdef HAVE_TLS
dcontext_t *dcontext;
/* We have to check this b/c this is called from __errno_location prior
* to os_tls_init, as well as after os_tls_exit, and early in a new
* thread's initialization (see comments below on that).
*/
if (!is_thread_tls_initialized())
return (IF_CLIENT_INTERFACE(standalone_library ? GLOBAL_DCONTEXT :) NULL);
/* We used to check tid and return NULL to distinguish parent from child, but
* that was affecting performance (xref PR 207366: but I'm leaving the assert in
* for now so debug build will still incur it). So we fixed the cases that
* needed that:
*
* - dynamo_thread_init() calling is_thread_initialized() for a new thread
* created via clone or the start/stop interface: so we have
* is_thread_initialized() pay the get_thread_id() cost.
* - new_thread_setup()'s ENTER_DR_HOOK kstats, or a crash and the signal
* handler asking about dcontext: we have new_thread_dynamo_start()
* clear the segment register for us early on.
* - child of fork (ASSERT_OWN_NO_LOCKS, etc. on re-entering DR):
* here we just suppress the assert: we'll use this same dcontext.
* xref PR 209518 where w/o this fix we used to need an extra KSTOP.
*
* An alternative would be to have the parent thread clear the segment
* register, or even set up the child's TLS ahead of time ourselves
* (and special-case so that we know if at clone syscall the app state is not
* quite correct: but we're already stealing a register there: PR 286194).
* We could also have the kernel set up TLS for us (PR 285898).
*
* For hotp_only or non-full-control (native_exec, e.g.) (PR 212012), this
* routine is not the only issue: we have to catch all new threads since
* hotp_only gateways assume tls is set up.
* Xref PR 192231.
*/
/* PR 307698: this assert causes large slowdowns (also xref PR 207366) */
DOCHECK(CHKLVL_DEFAULT+1, {
ASSERT(get_tls_thread_id() == get_sys_thread_id() ||
/* ok for fork as mentioned above */
pid_cached != get_process_id());
});
READ_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
return dcontext;
#else
/* Assumption: no lock needed on a read => no race conditions between
* reading and writing same tid! Since both get and set are only for
* the current thread, they cannot both execute simultaneously for the
* same tid, right?
*/
thread_id_t tid = get_thread_id();
int i;
if (tls_table != NULL) {
for (i=0; i<MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
return tls_table[i].dcontext;
}
}
}
return NULL;
#endif
}
/* sets the thread-private dcontext pointer for the calling thread */
void
set_thread_private_dcontext(dcontext_t *dcontext)
{
#ifdef HAVE_TLS
ASSERT(is_thread_tls_allocated());
WRITE_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
#else
thread_id_t tid = get_thread_id();
int i;
bool found = false;
ASSERT(tls_table != NULL);
mutex_lock(&tls_lock);
for (i=0; i<MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
if (dcontext == NULL) {
/* if setting to NULL, clear the entire slot for reuse */
tls_table[i].tid = 0;
}
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
if (!found) {
if (dcontext == NULL) {
/* don't do anything...but why would this happen? */
} else {
/* look for an empty slot */
for (i=0; i<MAX_THREADS; i++) {
if (tls_table[i].tid == 0) {
tls_table[i].tid = tid;
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
}
}
mutex_unlock(&tls_lock);
ASSERT(found);
#endif
}
/* replaces old with new
* use for forking: child should replace parent's id with its own
*/
static void
replace_thread_id(thread_id_t old, thread_id_t new)
{
#ifdef HAVE_TLS
thread_id_t new_tid = new;
ASSERT(is_thread_tls_initialized());
DOCHECK(1, {
thread_id_t old_tid;
READ_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid);
ASSERT(old_tid == old);
});
WRITE_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid);
#else
int i;
mutex_lock(&tls_lock);
for (i=0; i<MAX_THREADS; i++) {
if (tls_table[i].tid == old) {
tls_table[i].tid = new;
break;
}
}
mutex_unlock(&tls_lock);
#endif
}
#endif /* !NOT_DYNAMORIO_CORE_PROPER */
/* translate permission string to platform independent protection bits */
uint
permstr_to_memprot(const char * const perm)
{
uint mem_prot = 0;
if (perm == NULL || *perm == '\0')
return mem_prot;
if (perm[2]=='x')
mem_prot |= MEMPROT_EXEC;
if (perm[1]=='w')
mem_prot |= MEMPROT_WRITE;
if (perm[0]=='r')
mem_prot |= MEMPROT_READ;
return mem_prot;
}
/* translate platform independent protection bits to native flags */
uint
memprot_to_osprot(uint prot)
{
uint mmap_prot = 0;
if (TEST(MEMPROT_EXEC, prot))
mmap_prot |= PROT_EXEC;
if (TEST(MEMPROT_READ, prot))
mmap_prot |= PROT_READ;
if (TEST(MEMPROT_WRITE, prot))
mmap_prot |= PROT_WRITE;
return mmap_prot;
}
#ifndef NOT_DYNAMORIO_CORE_PROPER
/* translate native flags to platform independent protection bits */
static inline uint
osprot_to_memprot(uint prot)
{
uint mem_prot = 0;
if (TEST(PROT_EXEC, prot))
mem_prot |= MEMPROT_EXEC;
if (TEST(PROT_READ, prot))
mem_prot |= MEMPROT_READ;
if (TEST(PROT_WRITE, prot))
mem_prot |= MEMPROT_WRITE;
return mem_prot;
}
#endif
/* returns osprot flags preserving all native protection flags except
* for RWX, which are replaced according to memprot */
uint
osprot_replace_memprot(uint old_osprot, uint memprot)
{
/* Note only protection flags PROT_ are relevant to mprotect()
* and they are separate from any other MAP_ flags passed to mmap()
*/
uint new_osprot = memprot_to_osprot(memprot);
return new_osprot;
}
/* libc independence */
static inline long
mprotect_syscall(byte *p, size_t size, uint prot)
{
return dynamorio_syscall(SYS_mprotect, 3, p, size, prot);
}
bool
mmap_syscall_succeeded(byte *retval)
{
ptr_int_t result = (ptr_int_t) retval;
/* libc interprets up to -PAGE_SIZE as an error, and you never know if
* some weird errno will be used by say vmkernel (xref PR 365331)
*/
bool fail = (result < 0 && result >= -PAGE_SIZE);
ASSERT_CURIOSITY(!fail ||
IF_VMX86(result == -ENOENT ||)
IF_VMX86(result == -ENOSPC ||)
result == -EBADF ||
result == -EACCES ||
result == -EINVAL ||
result == -ETXTBSY ||
result == -EAGAIN ||
result == -ENOMEM ||
result == -ENODEV ||
result == -EFAULT ||
result == -EPERM);
return !fail;
}
/* N.B.: offs should be in pages for 32-bit Linux */
static inline byte *
mmap_syscall(byte *addr, size_t len, ulong prot, ulong flags, ulong fd, ulong offs)
{
#if defined(MACOS) && !defined(X64)
return (byte *)(ptr_int_t)
dynamorio_syscall(SYS_mmap, 7, addr, len, prot, flags, fd,
/* represent 64-bit arg as 2 32-bit args */
offs, 0);
#else
return (byte *)(ptr_int_t)
dynamorio_syscall(IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)), 6,
addr, len, prot, flags, fd, offs);
#endif
}
static inline long
munmap_syscall(byte *addr, size_t len)
{
return dynamorio_syscall(SYS_munmap, 2, addr, len);
}
#ifndef NOT_DYNAMORIO_CORE_PROPER
/* free memory allocated from os_raw_mem_alloc */
bool
os_raw_mem_free(void *p, size_t size, uint flags, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
return (rc == 0);
}
/* try to alloc memory at preferred from os directly,
* caller is required to handle thread synchronization and to update
*/
void *
os_raw_mem_alloc(void *preferred, size_t size, uint prot, uint flags,
heap_error_code_t *error_code)
{
byte *p;
uint os_prot = memprot_to_osprot(prot);
uint os_flags = MAP_PRIVATE |
MAP_ANONYMOUS |
(TEST(RAW_ALLOC_32BIT, flags) ? MAP_32BIT : 0);
ASSERT(error_code != NULL);
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
p = mmap_syscall(preferred, size, os_prot, os_flags, -1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 3,
"os_raw_mem_alloc %d bytes failed"PFX"\n", size, p);
return NULL;
}
if (preferred != NULL && p != preferred) {
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_raw_mem_free(p, size, flags, error_code);
LOG(GLOBAL, LOG_HEAP, 3,
"os_raw_mem_alloc %d bytes failed"PFX"\n", size, p);
return NULL;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_alloc: "SZFMT" bytes @ "PFX"\n",
size, p);
return p;
}
#ifdef LINUX
void
init_emulated_brk(app_pc exe_end)
{
ASSERT(DYNAMO_OPTION(emulate_brk));
if (app_brk_map != NULL)
return;
/* i#1004: emulate brk via a separate mmap.
* The real brk starts out empty, but we need at least a page to have an
* mmap placeholder.
*/
app_brk_map = mmap_syscall(exe_end, PAGE_SIZE, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT(mmap_syscall_succeeded(app_brk_map));
app_brk_cur = app_brk_map;
app_brk_end = app_brk_map + PAGE_SIZE;
}
static byte *
emulate_app_brk(dcontext_t *dcontext, byte *new_val)
{
byte *old_brk = app_brk_cur;
ASSERT(DYNAMO_OPTION(emulate_brk));
LOG(THREAD, LOG_HEAP, 2, "%s: cur="PFX", requested="PFX"\n",
__FUNCTION__, app_brk_cur, new_val);
new_val = (byte *) ALIGN_FORWARD(new_val, PAGE_SIZE);
if (new_val == NULL || new_val == app_brk_cur ||
/* Not allowed to shrink below original base */
new_val < app_brk_map) {
/* Just return cur val */
} else if (new_val < app_brk_cur) {
/* Shrink */
if (munmap_syscall(new_val, app_brk_cur - new_val) == 0) {
app_brk_cur = new_val;
app_brk_end = new_val;
}
} else if (new_val < app_brk_end) {
/* We've already allocated the space */
app_brk_cur = new_val;
} else {
/* Expand */
byte *remap = (byte *)
dynamorio_syscall(SYS_mremap, 4, app_brk_map,
app_brk_end - app_brk_map,
new_val - app_brk_map, 0/*do not move*/);
if (mmap_syscall_succeeded(remap)) {
ASSERT(remap == app_brk_map);
app_brk_cur = new_val;
app_brk_end = new_val;
} else {
LOG(THREAD, LOG_HEAP, 1, "%s: mremap to "PFX" failed\n",
__FUNCTION__, new_val);
}
}
if (app_brk_cur != old_brk)
handle_app_brk(dcontext, app_brk_map, old_brk, app_brk_cur);
return app_brk_cur;
}
#endif /* LINUX */
#if defined(CLIENT_INTERFACE) && defined(LINUX)
DR_API
/* XXX: could add dr_raw_mem_realloc() instead of dr_raw_mremap() -- though there
* is no realloc for Windows: supposed to reserve yourself and then commit in
* pieces.
*/
void *
dr_raw_mremap(void *old_address, size_t old_size, size_t new_size,
int flags, void *new_address)
{
byte *res;
dr_mem_info_t info;
dcontext_t *dcontext = get_thread_private_dcontext();
/* i#173: we need prot + type from prior to mremap */
DEBUG_DECLARE(bool ok =)
query_memory_ex(old_address, &info);
/* XXX: this could be a large region w/ multiple protection regions
* inside. For now we assume our handling of it doesn't care.
*/
ASSERT(ok);
if (is_pretend_or_executable_writable(old_address))
info.prot |= DR_MEMPROT_WRITE;
/* we just unconditionally send the 5th param */
res = (byte *) dynamorio_syscall(SYS_mremap, 5, old_address, old_size, new_size,
flags, new_address);
handle_app_mremap(dcontext, res, new_size, old_address, old_size,
info.prot, info.size);
return res;
}
DR_API
void *
dr_raw_brk(void *new_address)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
return (void *) emulate_app_brk(dcontext, (byte *)new_address);
} else {
/* We pay the cost of 2 syscalls. This should be infrequent enough that
* it doesn't mater.
*/
if (new_address == NULL) {
/* Just a query */
return (void *) dynamorio_syscall(SYS_brk, 1, new_address);
} else {
byte *old_brk = (byte *) dynamorio_syscall(SYS_brk, 1, 0);
byte *res = (byte *) dynamorio_syscall(SYS_brk, 1, new_address);
handle_app_brk(dcontext, NULL, old_brk, res);
return res;
}
}
}
#endif /* CLIENT_INTERFACE && LINUX */
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_free(void *p, size_t size, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_free: %d bytes @ "PFX"\n", size, p);
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
ASSERT(rc == 0);
}
/* reserve virtual address space without committing swap space for it,
and of course no physical pages since it will never be touched */
/* to be transparent, we do not use sbrk, and are
* instead using mmap, and asserting that all os_heap requests are for
* reasonably large pieces of memory */
void *
os_heap_reserve(void *preferred, size_t size, heap_error_code_t *error_code,
bool executable)
{
void *p;
uint prot = PROT_NONE;
#ifdef VMX86_SERVER
/* PR 365331: we need to be in the mmap_text region for code cache and
* gencode (PROT_EXEC).
*/
ASSERT(!os_in_vmkernel_userworld() ||
!executable || preferred == NULL ||
((byte *)preferred >= os_vmk_mmap_text_start() &&
((byte *)preferred)+size <= os_vmk_mmap_text_end()));
/* Note that a preferred address overrides PROT_EXEC and a mmap_data
* address will be honored, even though any execution there will fault.
*/
/* FIXME: note that PROT_EXEC => read access, so our guard pages and other
* non-committed memory, while not writable, is readable.
* Plus, we can't later clear all prot bits for userworld mmap due to PR 107872
* (PR 365748 covers fixing this for us).
* But in most uses we should get our preferred vmheap and shouldn't run
* out of vmheap, so this should be a corner-case issue.
*/
if (executable)
prot = PROT_EXEC;
#endif
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(error_code != NULL);
/* FIXME: note that this memory is in fact still committed - see man mmap */
/* FIXME: case 2347 on Linux or -vm_reserve should be set to false */
/* FIXME: Need to actually get a mmap-ing with |MAP_NORESERVE */
p = mmap_syscall(preferred, size, prot, MAP_PRIVATE|MAP_ANONYMOUS
IF_X64(| (DYNAMO_OPTION(heap_in_lower_4GB) ?
MAP_32BIT : 0)),
-1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 4,
"os_heap_reserve %d bytes failed "PFX"\n", size, p);
return NULL;
} else if (preferred != NULL && p != preferred) {
/* We didn't get the preferred address. To harmonize with windows behavior and
* give greater control we fail the reservation. */
heap_error_code_t dummy;
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_heap_free(p, size, &dummy);
ASSERT(dummy == HEAP_ERROR_SUCCESS);
LOG(GLOBAL, LOG_HEAP, 4,
"os_heap_reserve %d bytes at "PFX" not preferred "PFX"\n",
size, preferred, p);
return NULL;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve: %d bytes @ "PFX"\n", size, p);
#ifdef VMX86_SERVER
/* PR 365331: ensure our memory is all in the mmap_text region */
ASSERT(!os_in_vmkernel_userworld() || !executable ||
((byte *)p >= os_vmk_mmap_text_start() &&
((byte *)p) + size <= os_vmk_mmap_text_end()));
#endif
#if defined(ANDROID) && defined(DEBUG)
/* We don't label in release to be more transparent */
dynamorio_syscall(SYS_prctl, 5, PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size,
"DynamoRIO-internal");
#endif
return p;
}
static bool
find_free_memory_in_region(byte *start, byte *end, size_t size,
byte **found_start OUT, byte **found_end OUT)
{
memquery_iter_t iter;
/* XXX: despite /proc/sys/vm/mmap_min_addr == PAGE_SIZE, mmap won't
* give me that address if I use it as a hint.
*/
app_pc last_end = (app_pc) (PAGE_SIZE*16);
bool found = false;
memquery_iterator_start(&iter, NULL, false/*won't alloc*/);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= start &&
MIN(iter.vm_start, end) - MAX(last_end, start) >= size) {
if (found_start != NULL)
*found_start = MAX(last_end, start);
if (found_end != NULL)
*found_end = MIN(iter.vm_start, end);
found = true;
break;
}
if (iter.vm_start >= end)
break;
last_end = iter.vm_end;
}
memquery_iterator_stop(&iter);
return found;
}
void *
os_heap_reserve_in_region(void *start, void *end, size_t size,
heap_error_code_t *error_code, bool executable)
{
byte *p = NULL;
byte *try_start = NULL;
ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(end, PAGE_SIZE));
ASSERT(ALIGNED(size, PAGE_SIZE));
LOG(GLOBAL, LOG_HEAP, 3,
"os_heap_reserve_in_region: "SZFMT" bytes in "PFX"-"PFX"\n", size, start, end);
/* if no restriction on location use regular os_heap_reserve() */
if (start == (void *)PTR_UINT_0 && end == (void *)POINTER_MAX)
return os_heap_reserve(NULL, size, error_code, executable);
/* loop to handle races */
while (find_free_memory_in_region(start, end, size, &try_start, NULL)) {
p = os_heap_reserve(try_start, size, error_code, executable);
if (p != NULL) {
ASSERT(*error_code == HEAP_ERROR_SUCCESS);
ASSERT(p >= (byte *)start && p + size <= (byte *)end);
break;
}
}
if (p == NULL)
*error_code = HEAP_ERROR_CANT_RESERVE_IN_REGION;
else
*error_code = HEAP_ERROR_SUCCESS;
LOG(GLOBAL, LOG_HEAP, 2,
"os_heap_reserve_in_region: reserved "SZFMT" bytes @ "PFX" in "PFX"-"PFX"\n",
size, p, start, end);
return p;
}
/* commit previously reserved with os_heap_reserve pages */
/* returns false when out of memory */
/* A replacement of os_heap_alloc can be constructed by using os_heap_reserve
and os_heap_commit on a subset of the reserved pages. */
/* caller is required to handle thread synchronization */
bool
os_heap_commit(void *p, size_t size, uint prot, heap_error_code_t *error_code)
{
uint os_prot = memprot_to_osprot(prot);
long res;
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(p);
ASSERT(error_code != NULL);
/* FIXME: note that the memory would not be not truly committed if we have */
/* not actually marked a mmap-ing without MAP_NORESERVE */
res = mprotect_syscall(p, size, os_prot);
if (res != 0) {
*error_code = -res;
return false;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_commit: %d bytes @ "PFX"\n", size, p);
return true;
}
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_decommit(void *p, size_t size, heap_error_code_t *error_code)
{
int rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_decommit: %d bytes @ "PFX"\n", size, p);
*error_code = HEAP_ERROR_SUCCESS;
/* FIXME: for now do nothing since os_heap_reserve has in fact committed the memory */
rc = 0;
/* TODO:
p = mmap_syscall(p, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
we should either do a mremap()
or we can do a munmap() followed 'quickly' by a mmap() -
also see above the comment that os_heap_reserve() in fact is not so lightweight
*/
ASSERT(rc == 0);
}
bool
os_heap_systemwide_overcommit(heap_error_code_t last_error_code)
{
/* FIXME: conservative answer yes */
return true;
}
bool
os_heap_get_commit_limit(size_t *commit_used, size_t *commit_limit)
{
/* FIXME - NYI */
return false;
}
/* yield the current thread */
void
os_thread_yield()
{
#ifdef MACOS
/* XXX i#1291: use raw syscall instead */
swtch_pri(0);
#else
dynamorio_syscall(SYS_sched_yield, 0);
#endif
}
static bool
thread_signal(process_id_t pid, thread_id_t tid, int signum)
{
#ifdef MACOS
/* FIXME i#58: this takes in a thread port. Need to map thread id to port.
* Need to figure out whether we support raw Mach threads w/o pthread on top.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
/* FIXME: for non-NPTL use SYS_kill */
/* Note that the pid is equivalent to the thread group id.
* However, we can have threads sharing address space but not pid
* (if created via CLONE_VM but not CLONE_THREAD), so make sure to
* use the pid of the target thread, not our pid.
*/
return (dynamorio_syscall(SYS_tgkill, 3, pid, tid, signum) == 0);
#endif
}
static bool
known_thread_signal(thread_record_t *tr, int signum)
{
#ifdef MACOS
ptr_int_t res;
if (tr->dcontext == NULL)
return FALSE;
res = dynamorio_syscall(SYS___pthread_kill, 2, tr->dcontext->thread_port, signum);
LOG(THREAD_GET, LOG_ALL, 3, "%s: signal %d to port %d => %ld\n", __FUNCTION__,
signum, tr->dcontext->thread_port, res);
return res == 0;
#else
return thread_signal(tr->pid, tr->id, signum);
#endif
}
void
os_thread_sleep(uint64 milliseconds)
{
#ifdef MACOS
semaphore_t sem = MACH_PORT_NULL;
int res;
#else
struct timespec remain;
int count = 0;
#endif
struct timespec req;
req.tv_sec = (milliseconds / 1000);
/* docs say can go up to 1000000000, but doesn't work on FC9 */
req.tv_nsec = (milliseconds % 1000) * 1000000;
#ifdef MACOS
if (sem == MACH_PORT_NULL) {
DEBUG_DECLARE(kern_return_t res =)
semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
ASSERT(res == KERN_SUCCESS);
}
res = dynamorio_syscall(SYSNUM_NO_CANCEL(SYS___semwait_signal),
6, sem, MACH_PORT_NULL, 1, 1,
(int64_t)req.tv_sec, (int32_t)req.tv_nsec);
if (res == -EINTR) {
/* FIXME i#58: figure out how much time elapsed and re-wait */
}
#else
/* FIXME: if we need accurate sleeps in presence of itimers we should
* be using SYS_clock_nanosleep w/ an absolute time instead of relative
*/
while (dynamorio_syscall(SYS_nanosleep, 2, &req, &remain) == -EINTR) {
/* interrupted by signal or something: finish the interval */
ASSERT_CURIOSITY_ONCE(remain.tv_sec <= req.tv_sec &&
(remain.tv_sec < req.tv_sec ||
/* there seems to be some rounding, and sometimes
* remain nsec > req nsec (I've seen 40K diff)
*/
req.tv_nsec - remain.tv_nsec < 100000 ||
req.tv_nsec - remain.tv_nsec > -100000));
/* not unusual for client threads to use itimers and have their run
* routine sleep forever
*/
if (count++ > 3 && !IS_CLIENT_THREAD(get_thread_private_dcontext())) {
ASSERT_NOT_REACHED();
break; /* paranoid */
}
req = remain;
}
#endif
}
bool
os_thread_suspend(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* See synch comments in os_thread_resume: the mutex held there
* prevents prematurely sending a re-suspend signal.
*/
mutex_lock(&ostd->suspend_lock);
ostd->suspend_count++;
ASSERT(ostd->suspend_count > 0);
/* If already suspended, do not send another signal. However, we do
* need to ensure the target is suspended in case of a race, so we can't
* just return.
*/
if (ostd->suspend_count == 1) {
/* PR 212090: we use a custom signal handler to suspend. We wait
* here until the target reaches the suspend point, and leave it
* up to the caller to check whether it is a safe suspend point,
* to match Windows behavior.
*/
ASSERT(ksynch_get_value(&ostd->suspended) == 0);
if (!known_thread_signal(tr, SUSPEND_SIGNAL)) {
ostd->suspend_count--;
mutex_unlock(&ostd->suspend_lock);
return false;
}
}
/* we can unlock before the wait loop b/c we're using a separate "resumed"
* int and os_thread_resume holds the lock across its wait. this way a resume
* can proceed as soon as the suspended thread is suspended, before the
* suspending thread gets scheduled again.
*/
mutex_unlock(&ostd->suspend_lock);
while (ksynch_get_value(&ostd->suspended) == 0) {
/* For Linux, waits only if the suspended flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->suspended, 0);
if (ksynch_get_value(&ostd->suspended) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
return true;
}
bool
os_thread_resume(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* This mutex prevents sending a re-suspend signal before the target
* reaches a safe post-resume point from a first suspend signal.
* Given that race, we can't just use atomic_add_exchange_int +
* atomic_dec_becomes_zero on suspend_count.
*/
mutex_lock(&ostd->suspend_lock);
ASSERT(ostd->suspend_count > 0);
/* PR 479750: if do get here and target is not suspended then abort
* to avoid possible deadlocks
*/
if (ostd->suspend_count == 0) {
mutex_unlock(&ostd->suspend_lock);
return true; /* the thread is "resumed", so success status */
}
ostd->suspend_count--;
if (ostd->suspend_count > 0) {
mutex_unlock(&ostd->suspend_lock);
return true; /* still suspended */
}
ksynch_set_value(&ostd->wakeup, 1);
ksynch_wake(&ostd->wakeup);
while (ksynch_get_value(&ostd->resumed) == 0) {
/* For Linux, waits only if the resumed flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->resumed, 0);
if (ksynch_get_value(&ostd->resumed) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
ksynch_set_value(&ostd->wakeup, 0);
ksynch_set_value(&ostd->resumed, 0);
mutex_unlock(&ostd->suspend_lock);
return true;
}
bool
os_thread_terminate(thread_record_t *tr)
{
/* PR 297902: for NPTL sending SIGKILL will take down the whole group:
* so instead we send SIGUSR2 and have a flag set telling
* target thread to execute SYS_exit
*/
os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field;
ASSERT(ostd != NULL);
ostd->terminate = true;
/* Even if the thread is currently suspended, it's simpler to send it
* another signal than to resume it.
*/
return known_thread_signal(tr, SUSPEND_SIGNAL);
}
bool
is_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
ASSERT(ostd != NULL);
return (ksynch_get_value(&ostd->terminated) == 1);
}
static void
os_wait_thread_futex(KSYNCH_TYPE *var)
{
while (ksynch_get_value(var) == 0) {
/* On Linux, waits only if var is not set as 1. Return value
* doesn't matter because var will be re-checked.
*/
ksynch_wait(var, 0);
if (ksynch_get_value(var) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
}
void
os_wait_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->terminated);
}
void
os_wait_thread_detached(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->detached);
}
void
os_signal_thread_detach(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
ASSERT(ostd != NULL);
ostd->do_detach = true;
}
bool
thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then take the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
sigcontext_to_mcontext(mc, ostd->suspended_sigcxt);
return true;
}
bool
thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then replace the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
mcontext_to_sigcontext(ostd->suspended_sigcxt, mc);
return true;
}
bool
is_thread_currently_native(thread_record_t *tr)
{
return (!tr->under_dynamo_control ||
/* start/stop doesn't change under_dynamo_control and has its own field */
(tr->dcontext != NULL && tr->dcontext->currently_stopped));
}
#ifdef CLIENT_SIDELINE /* PR 222812: tied to sideline usage */
# ifdef LINUX /* XXX i#58: just until we have Mac support */
static void
client_thread_run(void)
{
void (*func)(void *param);
dcontext_t *dcontext;
byte *xsp;
GET_STACK_PTR(xsp);
void *crec = get_clone_record((reg_t)xsp);
IF_DEBUG(int rc = )
dynamo_thread_init(get_clone_record_dstack(crec), NULL, true);
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d *****\n\n",
get_thread_id());
/* We stored the func and args in particular clone record fields */
func = (void (*)(void *param)) signal_thread_inherit(dcontext, crec);
void *arg = (void *) get_clone_record_app_xsp(crec);
LOG(THREAD, LOG_ALL, 1, "func="PFX", arg="PFX"\n", func, arg);
(*func)(arg);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d EXITING *****\n\n",
get_thread_id());
cleanup_and_terminate(dcontext, SYS_exit, 0, 0, false/*just thread*/,
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
}
# endif
/* i#41/PR 222812: client threads
* * thread must have dcontext since many API routines require one and we
* don't expose GLOBAL_DCONTEXT (xref PR 243008, PR 216936, PR 536058)
* * reversed the old design of not using dstack (partly b/c want dcontext)
* and I'm using the same parent-creates-dstack and clone_record_t design
* to create linux threads: dstack should be big enough for client threads
* (xref PR 202669)
* * reversed the old design of explicit dr_terminate_client_thread(): now
* the thread is auto-terminated and stack cleaned up on return from run
* function
*/
DR_API bool
dr_create_client_thread(void (*func)(void *param), void *arg)
{
#ifdef LINUX
dcontext_t *dcontext = get_thread_private_dcontext();
byte *xsp;
/* We do not pass SIGCHLD since don't want signal to parent and don't support
* waiting on child.
* We do not pass CLONE_THREAD so that the new thread is in its own thread
* group, allowing it to have private itimers and not receive any signals
* sent to the app's thread groups. It also makes the thread not show up in
* the thread list for the app, making it more invisible.
*/
uint flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
IF_NOT_X64(| CLONE_SETTLS)
/* CLONE_THREAD required. Signals and itimers are private anyway. */
IF_VMX86(| (os_in_vmkernel_userworld() ? CLONE_THREAD : 0));
pre_second_thread();
/* need to share signal handler table, prior to creating clone record */
handle_clone(dcontext, flags);
void *crec = create_clone_record(dcontext, (reg_t*)&xsp);
/* make sure client_thread_run can get the func and arg, and that
* signal_thread_inherit gets the right syscall info
*/
set_clone_record_fields(crec, (reg_t) arg, (app_pc) func, SYS_clone, flags);
/* i#501 switch to app's tls before creating client thread */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, true/*to app*/);
# if defined(X86) && !defined(X64)
/* For the TCB we simply share the parent's. On Linux we could just inherit
* the same selector but not for VMX86_SERVER so we specify for both for
* 32-bit. Most of the fields are pthreads-specific and we assume the ones
* that will be used (such as tcbhead_t.sysinfo @0x10) are read-only.
*/
our_modify_ldt_t desc;
/* if get_segment_base() returned size too we could use it */
uint index = tls_priv_lib_index();
ASSERT(index != -1);
if (!tls_get_descriptor(index, &desc)) {
LOG(THREAD, LOG_ALL, 1,
"%s: client thread tls get entry %d failed\n", __FUNCTION__, index);
return false;
}
# endif
LOG(THREAD, LOG_ALL, 1, "dr_create_client_thread xsp="PFX" dstack="PFX"\n",
xsp, get_clone_record_dstack(crec));
thread_id_t newpid = dynamorio_clone(flags, xsp, NULL,
IF_X86_ELSE(IF_X64_ELSE(NULL, &desc), NULL),
NULL, client_thread_run);
/* i#501 switch to app's tls before creating client thread */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false/*to dr*/);
if (newpid < 0) {
LOG(THREAD, LOG_ALL, 1, "client thread creation failed: %d\n", newpid);
return false;
} else if (newpid == 0) {
/* dynamorio_clone() should have called client_thread_run directly */
ASSERT_NOT_REACHED();
return false;
}
return true;
#else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: implement on Mac */
return false;
#endif
}
#endif /* CLIENT_SIDELINE PR 222812: tied to sideline usage */
int
get_num_processors(void)
{
static uint num_cpu = 0; /* cached value */
if (!num_cpu) {
#ifdef MACOS
DEBUG_DECLARE(bool ok =)
sysctl_query(CTL_HW, HW_NCPU, &num_cpu, sizeof(num_cpu));
ASSERT(ok);
#else
/* We used to use get_nprocs_conf, but that's in libc, so now we just
* look at the /sys filesystem ourselves, which is what glibc does.
*/
uint local_num_cpus = 0;
file_t cpu_dir = os_open_directory("/sys/devices/system/cpu",
OS_OPEN_READ);
dir_iterator_t iter;
ASSERT(cpu_dir != INVALID_FILE &&
"/sys must be mounted: mount -t sysfs sysfs /sys");
os_dir_iterator_start(&iter, cpu_dir);
while (os_dir_iterator_next(&iter)) {
int dummy_num;
if (sscanf(iter.name, "cpu%d", &dummy_num) == 1)
local_num_cpus++;
}
os_close(cpu_dir);
num_cpu = local_num_cpus;
#endif
ASSERT(num_cpu);
}
return num_cpu;
}
/* i#46: To support -no_private_loader, we have to call the dlfcn family of
* routines in libdl.so. When we do early injection, there is no loader to
* resolve these imports, so they will crash. Early injection is incompatible
* with -no_private_loader, so this should never happen.
*/
#if defined(CLIENT_INTERFACE) || defined(HOT_PATCHING_INTERFACE)
shlib_handle_t
load_shared_library(const char *name, bool reachable)
{
# ifdef STATIC_LIBRARY
if (os_files_same(name, get_application_name())) {
/* The private loader falls back to dlsym() and friends for modules it
* doesn't recognize, so this works without disabling the private loader.
*/
return dlopen(NULL, RTLD_LAZY); /* Gets a handle to the exe. */
}
# endif
/* We call locate_and_load_private_library() to support searching for
* a pathless name.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
return (shlib_handle_t) locate_and_load_private_library(name, reachable);
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlopen(name, RTLD_LAZY);
# else
/* -no_private_loader is no longer supported in our default builds.
* If we want it for hybrid mode we should add a new build param and include
* the libdl calls here under that param.
*/
ASSERT_NOT_REACHED();
return NULL;
# endif
}
#endif
#if defined(CLIENT_INTERFACE)
shlib_routine_ptr_t
lookup_library_routine(shlib_handle_t lib, const char *name)
{
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
return (shlib_routine_ptr_t)
get_private_library_address((app_pc)lib, name);
}
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlsym(lib, name);
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
return NULL;
# endif
}
void
unload_shared_library(shlib_handle_t lib)
{
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
unload_private_library(lib);
} else {
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
if (!DYNAMO_OPTION(avoid_dlclose)) {
dlclose(lib);
}
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
# endif
}
}
void
shared_library_error(char *buf, int maxlen)
{
const char *err;
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
err = "error in private loader";
} else {
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
err = dlerror();
if (err == NULL) {
err = "dlerror returned NULL";
}
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported */
err = "unknown error";
# endif
}
strncpy(buf, err, maxlen-1);
buf[maxlen-1] = '\0'; /* strncpy won't put on trailing null if maxes out */
}
/* addr is any pointer known to lie within the library.
* for linux, one of addr or name is needed; for windows, neither is needed.
*/
bool
shared_library_bounds(IN shlib_handle_t lib, IN byte *addr,
IN const char *name,
OUT byte **start, OUT byte **end)
{
ASSERT(start != NULL && end != NULL);
/* PR 366195: dlopen() handle truly is opaque, so we have to use either
* addr or name
*/
ASSERT(addr != NULL || name != NULL);
*start = addr;
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
privmod_t *mod;
/* look for private library first */
acquire_recursive_lock(&privload_lock);
mod = privload_lookup_by_base((app_pc)lib);
if (name != NULL && mod == NULL)
mod = privload_lookup(name);
if (mod != NULL && !mod->externally_loaded) {
*start = mod->base;
if (end != NULL)
*end = mod->base + mod->size;
release_recursive_lock(&privload_lock);
return true;
}
release_recursive_lock(&privload_lock);
}
return (memquery_library_bounds(name, start, end, NULL, 0) > 0);
}
#endif /* defined(CLIENT_INTERFACE) */
#endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */
/* FIXME - not available in 2.0 or earlier kernels, not really an issue since no one
* should be running anything that old. */
int
llseek_syscall(int fd, int64 offset, int origin, int64 *result)
{
#if defined(X64) || defined(MACOS)
# ifndef X64
/* 2 slots for 64-bit arg */
*result = dynamorio_syscall(SYS_lseek, 4, fd, (uint)(offset & 0xFFFFFFFF),
(uint)((offset >> 32) & 0xFFFFFFFF), origin);
# else
*result = dynamorio_syscall(SYS_lseek, 3, fd, offset, origin);
# endif
return ((*result > 0) ? 0 : (int)*result);
#else
return dynamorio_syscall(SYS__llseek, 5, fd, (uint)((offset >> 32) & 0xFFFFFFFF),
(uint)(offset & 0xFFFFFFFF), result, origin);
#endif
}
static ptr_int_t
dynamorio_syscall_stat(const char *fname, struct stat64 *st)
{
#ifdef SYSNUM_STAT
return dynamorio_syscall(SYSNUM_STAT, 2, fname, st);
#else
return dynamorio_syscall(SYS_fstatat, 4, AT_FDCWD, fname, st, 0);
#endif
}
bool
os_file_exists(const char *fname, bool is_dir)
{
/* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */
struct stat64 st;
ptr_int_t res = dynamorio_syscall_stat(fname, &st);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res);
return false;
}
return (!is_dir || S_ISDIR(st.st_mode));
}
/* Returns true if two paths point to the same file. Follows symlinks.
*/
bool
os_files_same(const char *path1, const char *path2)
{
struct stat64 st1, st2;
ptr_int_t res = dynamorio_syscall_stat(path1, &st1);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res);
return false;
}
res = dynamorio_syscall_stat(path2, &st2);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res);
return false;
}
return st1.st_ino == st2.st_ino;
}
bool
os_get_file_size(const char *file, uint64 *size)
{
/* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */
struct stat64 st;
ptr_int_t res = dynamorio_syscall_stat(file, &st);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res);
return false;
}
ASSERT(size != NULL);
*size = st.st_size;
return true;
}
bool
os_get_file_size_by_handle(file_t fd, uint64 *size)
{
/* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */
struct stat64 st;
ptr_int_t res = dynamorio_syscall(SYSNUM_FSTAT, 2, fd, &st);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res);
return false;
}
ASSERT(size != NULL);
*size = st.st_size;
return true;
}
/* created directory will be owned by effective uid,
* Note a symbolic link will never be followed.
*/
bool
os_create_dir(const char *fname, create_directory_flags_t create_dir_flags)
{
bool require_new = TEST(CREATE_DIR_REQUIRE_NEW, create_dir_flags);
#ifdef SYS_mkdir
int rc = dynamorio_syscall(SYS_mkdir, 2, fname, S_IRWXU|S_IRWXG);
#else
int rc = dynamorio_syscall(SYS_mkdirat, 3, AT_FDCWD, fname, S_IRWXU|S_IRWXG);
#endif
ASSERT(create_dir_flags == CREATE_DIR_REQUIRE_NEW ||
create_dir_flags == CREATE_DIR_ALLOW_EXISTING);
return (rc == 0 || (!require_new && rc == -EEXIST));
}
bool
os_delete_dir(const char *name)
{
#ifdef SYS_rmdir
return (dynamorio_syscall(SYS_rmdir, 1, name) == 0);
#else
return (dynamorio_syscall(SYS_unlinkat, 3,
AT_FDCWD, name, AT_REMOVEDIR) == 0);
#endif
}
int
open_syscall(const char *file, int flags, int mode)
{
ASSERT(file != NULL);
#ifdef SYS_open
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_open), 3, file, flags, mode);
#else
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_openat), 4,
AT_FDCWD, file, flags, mode);
#endif
}
int
close_syscall(int fd)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_close), 1, fd);
}
int
dup_syscall(int fd)
{
return dynamorio_syscall(SYS_dup, 1, fd);
}
ssize_t
read_syscall(int fd, void *buf, size_t nbytes)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_read), 3, fd, buf, nbytes);
}
ssize_t
write_syscall(int fd, const void *buf, size_t nbytes)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_write), 3, fd, buf, nbytes);
}
#ifndef NOT_DYNAMORIO_CORE_PROPER
static int
fcntl_syscall(int fd, int cmd, long arg)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_fcntl), 3, fd, cmd, arg);
}
#endif /* !NOT_DYNAMORIO_CORE_PROPER */
/* not easily accessible in header files */
#ifndef O_LARGEFILE
# ifdef X64
/* not needed */
# define O_LARGEFILE 0
# else
# define O_LARGEFILE 0100000
# endif
#endif
/* we assume that opening for writing wants to create file.
* we also assume that nobody calling this is creating a persistent
* file: for that, use os_open_protected() to avoid leaking on exec
* and to separate from the app's files.
*/
file_t
os_open(const char *fname, int os_open_flags)
{
int res;
int flags = 0;
if (TEST(OS_OPEN_ALLOW_LARGE, os_open_flags))
flags |= O_LARGEFILE;
if (TEST(OS_OPEN_WRITE_ONLY, os_open_flags))
res = open_syscall(fname, flags|O_WRONLY, 0);
else if (!TEST(OS_OPEN_WRITE, os_open_flags))
res = open_syscall(fname, flags|O_RDONLY, 0);
else {
res = open_syscall(fname, flags|O_RDWR|O_CREAT|
(TEST(OS_OPEN_APPEND, os_open_flags) ?
/* Currently we only support either appending
* or truncating, just like Windows and the client
* interface. If we end up w/ a use case that wants
* neither it could open append and then seek; if we do
* add OS_TRUNCATE or sthg we'll need to add it to
* any current writers who don't set OS_OPEN_REQUIRE_NEW.
*/
O_APPEND : O_TRUNC) |
(TEST(OS_OPEN_REQUIRE_NEW, os_open_flags) ?
O_EXCL : 0),
S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP);
}
if (res < 0)
return INVALID_FILE;
return res;
}
file_t
os_open_directory(const char *fname, int os_open_flags)
{
/* no special handling */
return os_open(fname, os_open_flags);
}
void
os_close(file_t f)
{
close_syscall(f);
}
#ifndef NOT_DYNAMORIO_CORE_PROPER
/* dups curfd to a private fd.
* returns -1 if unsuccessful.
*/
file_t
fd_priv_dup(file_t curfd)
{
file_t newfd = -1;
if (DYNAMO_OPTION(steal_fds) > 0) {
/* RLIMIT_NOFILES is 1 greater than max and F_DUPFD starts at given value */
/* XXX: if > linux 2.6.24, can use F_DUPFD_CLOEXEC to avoid later call:
* so how do we tell if the flag is supported? try calling once at init?
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd);
if (newfd < 0) {
/* We probably ran out of fds, esp if debug build and there are
* lots of threads. Should we track how many we've given out to
* avoid a failed syscall every time after?
*/
SYSLOG_INTERNAL_WARNING_ONCE("ran out of stolen fd space");
/* Try again but this time in the app space, somewhere high up
* to avoid issues like tcsh assuming it can own fds 3-5 for
* piping std{in,out,err} (xref the old -open_tcsh_fds option).
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd/2);
}
}
return newfd;
}
bool
fd_mark_close_on_exec(file_t fd)
{
/* we assume FD_CLOEXEC is the only flag and don't bother w/ F_GETFD */
if (fcntl_syscall(fd, F_SETFD, FD_CLOEXEC) != 0) {
SYSLOG_INTERNAL_WARNING("unable to mark file %d as close-on-exec", fd);
return false;
}
return true;
}
void
fd_table_add(file_t fd, uint flags)
{
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
DODEBUG({
/* i#1010: If the fd is already in the table, chances are it's a
* stale logfile fd left behind by a vforked or cloned child that
* called execve. Avoid an assert if that happens.
*/
bool present = generic_hash_remove(GLOBAL_DCONTEXT, fd_table,
(ptr_uint_t)fd);
ASSERT_CURIOSITY_ONCE(!present && "stale fd not cleaned up");
});
generic_hash_add(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd,
/* store the flags, w/ a set bit to ensure not 0 */
(void *)(ptr_uint_t)(flags|OS_OPEN_RESERVED));
TABLE_RWLOCK(fd_table, write, unlock);
} else {
#ifdef DEBUG
num_fd_add_pre_heap++;
/* we add main_logfile in os_init() */
ASSERT(num_fd_add_pre_heap == 1 && "only main_logfile should come here");
#endif
}
}
static bool
fd_is_dr_owned(file_t fd)
{
ptr_uint_t flags;
ASSERT(fd_table != NULL);
TABLE_RWLOCK(fd_table, read, lock);
flags = (ptr_uint_t) generic_hash_lookup(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
TABLE_RWLOCK(fd_table, read, unlock);
return (flags != 0);
}
static bool
fd_is_in_private_range(file_t fd)
{
return (DYNAMO_OPTION(steal_fds) > 0 && min_dr_fd > 0 && fd >= min_dr_fd);
}
file_t
os_open_protected(const char *fname, int os_open_flags)
{
file_t dup;
file_t res = os_open(fname, os_open_flags);
if (res < 0)
return res;
/* we could have os_open() always switch to a private fd but it's probably
* not worth the extra syscall for temporary open/close sequences so we
* only use it for persistent files
*/
dup = fd_priv_dup(res);
if (dup >= 0) {
close_syscall(res);
res = dup;
fd_mark_close_on_exec(res);
} /* else just keep original */
/* ditto here, plus for things like config.c opening files we can't handle
* grabbing locks and often don't have heap available so no fd_table
*/
fd_table_add(res, os_open_flags);
return res;
}
void
os_close_protected(file_t f)
{
ASSERT(fd_table != NULL || dynamo_exited);
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)f);
TABLE_RWLOCK(fd_table, write, unlock);
}
os_close(f);
}
bool
os_get_current_dir(char *buf, size_t bufsz)
{
# ifdef MACOS
static char noheap_buf[MAXPATHLEN];
bool res = false;
file_t fd = os_open(".", OS_OPEN_READ);
int len;
/* F_GETPATH assumes a buffer of size MAXPATHLEN */
char *fcntl_buf;
if (dynamo_heap_initialized)
fcntl_buf = global_heap_alloc(MAXPATHLEN HEAPACCT(ACCT_OTHER));
else
fcntl_buf = noheap_buf;
if (fd == INVALID_FILE)
goto cwd_error;
if (fcntl_syscall(fd, F_GETPATH, (long)fcntl_buf) != 0)
goto cwd_error;
len = snprintf(buf, bufsz, "%s", fcntl_buf);
buf[bufsz-1] = '\0';
return (len > 0 && len < bufsz);
cwd_error:
if (dynamo_heap_initialized)
global_heap_free(fcntl_buf, MAXPATHLEN HEAPACCT(ACCT_OTHER));
os_close(fd);
return res;
# else
return (dynamorio_syscall(SYS_getcwd, 2, buf, bufsz) > 0);
# endif
}
#endif /* !NOT_DYNAMORIO_CORE_PROPER */
#ifndef NOT_DYNAMORIO_CORE_PROPER /* so drinject can use drdecode's copy */
ssize_t
os_write(file_t f, const void *buf, size_t count)
{
return write_syscall(f, buf, count);
}
#endif /* !NOT_DYNAMORIO_CORE_PROPER */
ssize_t
os_read(file_t f, void *buf, size_t count)
{
return read_syscall(f, buf, count);
}
void
os_flush(file_t f)
{
/* we're not using FILE*, so there is no buffering */
}
/* seek the current file position to offset bytes from origin, return true if successful */
bool
os_seek(file_t f, int64 offset, int origin)
{
int64 result;
int ret = 0;
ret = llseek_syscall(f, offset, origin, &result);
return (ret == 0);
}
/* return the current file position, -1 on failure */
int64
os_tell(file_t f)
{
int64 result = -1;
int ret = 0;
ret = llseek_syscall(f, 0, SEEK_CUR, &result);
if (ret != 0)
return -1;
return result;
}
bool
os_delete_file(const char *name)
{
#ifdef SYS_unlink
return (dynamorio_syscall(SYS_unlink, 1, name) == 0);
#else
return (dynamorio_syscall(SYS_unlinkat, 3, AT_FDCWD, name, 0) == 0);
#endif
}
bool
os_rename_file(const char *orig_name, const char *new_name, bool replace)
{
ptr_int_t res;
if (!replace) {
/* SYS_rename replaces so we must test beforehand => could have race */
/* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */
struct stat64 st;
ptr_int_t res = dynamorio_syscall_stat(new_name, &st);
if (res == 0)
return false;
else if (res != -ENOENT) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s stat failed: "PIFX"\n", __func__, res);
return false;
}
}
#ifdef SYS_rename
res = dynamorio_syscall(SYS_rename, 2, orig_name, new_name);
#else
res = dynamorio_syscall(SYS_renameat, 4,
AT_FDCWD, orig_name, AT_FDCWD, new_name);
#endif
if (res != 0)
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s \"%s\" to \"%s\" failed: "PIFX"\n",
__func__, orig_name, new_name, res);
return (res == 0);
}
bool
os_delete_mapped_file(const char *filename)
{
return os_delete_file(filename);
}
byte *
os_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags)
{
int flags;
byte *map;
#if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER)
bool loop = false;
uint iters = 0;
# define MAX_MMAP_LOOP_ITERS 100
byte *region_start = NULL, *region_end = NULL;
#else
uint pg_offs;
ASSERT_TRUNCATE(pg_offs, uint, offs / PAGE_SIZE);
pg_offs = (uint) (offs / PAGE_SIZE);
#endif
#ifdef VMX86_SERVER
flags = MAP_PRIVATE; /* MAP_SHARED not supported yet */
#else
flags = TEST(MAP_FILE_COPY_ON_WRITE, map_flags) ? MAP_PRIVATE : MAP_SHARED;
#endif
#if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER)
/* Allocate memory from reachable range for image: or anything (pcache
* in particular): for low 4GB, easiest to just pass MAP_32BIT (which is
* low 2GB, but good enough).
*/
if (DYNAMO_OPTION(heap_in_lower_4GB) && !TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_32BIT;
#endif
/* Allows memory request instead of mapping a file,
* so we can request memory from a particular address with fixed argument */
if (f == -1)
flags |= MAP_ANONYMOUS;
if (TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_FIXED;
/* Reachability is not supported for drinjectlib */
#if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER)
if (!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags)) {
vmcode_get_reachable_region(®ion_start, ®ion_end);
/* addr need not be NULL: we'll use it if it's in the region */
ASSERT(!TEST(MAP_FILE_FIXED, map_flags));
/* Loop to handle races */
loop = true;
}
while (!loop ||
(addr != NULL && addr >= region_start && addr+*size <= region_end) ||
find_free_memory_in_region(region_start, region_end, *size, &addr, NULL)) {
#endif
map = mmap_syscall(addr, *size, memprot_to_osprot(prot),
flags, f,
/* x86 Linux mmap uses offset in pages */
IF_LINUX_ELSE(IF_X64_ELSE(offs, pg_offs), offs));
if (!mmap_syscall_succeeded(map)) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n",
__func__, map);
map = NULL;
}
#if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER)
else if (loop && (map < region_start || map+*size > region_end)) {
/* Try again: probably a race. Hopefully our notion of "there's a free
* region big enough" matches the kernel's, else we'll loop forever
* (which we try to catch w/ a max iters count).
*/
munmap_syscall(map, *size);
map = NULL;
} else
break;
if (!loop)
break;
if (++iters > MAX_MMAP_LOOP_ITERS) {
ASSERT_NOT_REACHED();
map = NULL;
break;
}
addr = NULL; /* pick a new one */
}
#endif
return map;
}
bool
os_unmap_file(byte *map, size_t size)
{
long res = munmap_syscall(map, size);
return (res == 0);
}
/* around most of file, to exclude preload */
#if !defined(NOT_DYNAMORIO_CORE_PROPER) || defined(STANDALONE_UNIT_TEST)
bool
os_get_disk_free_space(/*IN*/ file_t file_handle,
/*OUT*/ uint64 *AvailableQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalVolumeBytes /*OPTIONAL*/)
{
/* libc struct seems to match kernel's */
struct statfs stat;
ptr_int_t res = dynamorio_syscall(SYS_fstatfs, 2, file_handle, &stat);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res);
return false;
}
LOG(GLOBAL, LOG_STATS, 3,
"os_get_disk_free_space: avail="SZFMT", free="SZFMT", bsize="SZFMT"\n",
stat.f_bavail, stat.f_bfree, stat.f_bsize);
if (AvailableQuotaBytes != NULL)
*AvailableQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
/* no support for quotas */
if (TotalQuotaBytes != NULL)
*TotalQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
if (TotalVolumeBytes != NULL) /* despite name this is how much is free */
*TotalVolumeBytes = ((uint64)stat.f_bfree * stat.f_bsize);
return true;
}
#ifdef LINUX
static bool
symlink_is_self_exe(const char *path)
{
/* Look for "/proc/%d/exe" where %d exists in /proc/self/task/%d,
* or "/proc/self/exe". Rule out the exe link for another process
* (though it could also be under DR we have no simple way to obtain
* its actual app path).
*/
# define SELF_LEN_LEADER 6 /* "/proc/" */
# define SELF_LEN_TRAILER 4 /* "/exe" */
# define SELF_LEN_MAX 18
size_t len = strlen(path);
if (strcmp(path, "/proc/self/exe") == 0)
return true;
if (len < SELF_LEN_MAX && /* /proc/nnnnnn/exe */
strncmp(path, "/proc/", SELF_LEN_LEADER) == 0 &&
strncmp(path + len - SELF_LEN_TRAILER, "/exe", SELF_LEN_TRAILER) == 0) {
int pid;
if (sscanf(path + SELF_LEN_LEADER, "%d", &pid) == 1) {
char task[32];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", pid);
NULL_TERMINATE_BUFFER(task);
return os_file_exists(task, true/*dir*/);
}
}
return false;
}
#endif
void
exit_process_syscall(long status)
{
/* We now assume SYS_exit_group is defined: not building on old machines,
* but will execute there. We try exit_group and if it fails we use exit.
*
* FIXME: if no exit_group, kill all other threads (==processes in same addr
* space) manually? Presumably we got here b/c at an unsafe point to do
* full exit? Or is that not true: what about dr_abort()?
*/
dynamorio_syscall(SYSNUM_EXIT_PROCESS, 1, status);
/* would assert that result is -ENOSYS but assert likely calls us => infinite loop */
exit_thread_syscall(status);
ASSERT_NOT_REACHED();
}
void
exit_thread_syscall(long status)
{
#ifdef MACOS
mach_port_t thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
/* FIXME i#1403: on MacOS we fail to free the app's stack: we need to pass it to
* bsdthread_terminate.
*/
dynamorio_syscall(SYSNUM_EXIT_THREAD, 4, 0, 0, thread_port, 0);
#else
dynamorio_syscall(SYSNUM_EXIT_THREAD, 1, status);
#endif
}
/* FIXME: this one will not be easily internationalizable
yet it is easier to have a syslog based Unix implementation with real strings.
*/
void
os_syslog(syslog_event_type_t priority, uint message_id,
uint substitutions_num, va_list args) {
int native_priority;
switch (priority) {
case SYSLOG_INFORMATION: native_priority = LOG_INFO; break;
case SYSLOG_WARNING: native_priority = LOG_WARNING; break;
case SYSLOG_CRITICAL: native_priority = LOG_CRIT; break;
case SYSLOG_ERROR: native_priority = LOG_ERR; break;
default:
ASSERT_NOT_REACHED();
}
/* can amount to passing a format string (careful here) to vsyslog */
/* Never let user controlled data in the format string! */
ASSERT_NOT_IMPLEMENTED(false);
}
/* This is subject to races, but should only happen at init/attach when
* there should only be one live thread.
*/
static bool
safe_read_via_query(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
bool res = false;
size_t num_read = 0;
ASSERT(!fault_handling_initialized);
/* XXX: in today's init ordering, allmem will never be initialized when we come
* here, but we check it nevertheless to be general in case this routine is
* ever called at some later time
*/
if (IF_MEMQUERY_ELSE(false, memcache_initialized()))
res = is_readable_without_exception_internal(base, size, false/*use allmem*/);
else
res = is_readable_without_exception_query_os((void *)base, size);
if (res) {
memcpy(out_buf, base, size);
num_read = size;
}
if (bytes_read != NULL)
*bytes_read = num_read;
return res;
}
bool
safe_read_ex(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
STATS_INC(num_safe_reads);
/* XXX i#350: we'd like to always use safe_read_fast() and remove this extra
* call layer, but safe_read_fast() requires fault handling to be set up.
* We do set up an early signal handler in os_init(),
* but there is still be a window prior to that with no handler.
*/
if (!fault_handling_initialized) {
return safe_read_via_query(base, size, out_buf, bytes_read);
} else {
return safe_read_fast(base, size, out_buf, bytes_read);
}
}
bool
safe_read_if_fast(const void *base, size_t size, void *out_buf)
{
if (!fault_handling_initialized) {
memcpy(out_buf, base, size);
return true;
} else {
return safe_read_ex(base, size, out_buf, NULL);
}
}
/* FIXME - fold this together with safe_read_ex() (is a lot of places to update) */
bool
safe_read(const void *base, size_t size, void *out_buf)
{
return safe_read_ex(base, size, out_buf, NULL);
}
bool
safe_write_ex(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_try_except(base, size, in_buf, bytes_written);
}
/* is_readable_without_exception checks to see that all bytes with addresses
* from pc to pc+size-1 are readable and that reading from there won't
* generate an exception. if 'from_os' is true, check what the os thinks
* the prot bits are instead of using the all memory list.
*/
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os)
{
uint prot = MEMPROT_NONE;
byte *check_pc = (byte *) ALIGN_BACKWARD(pc, PAGE_SIZE);
if (size > ((byte *)POINTER_MAX - pc))
size = (byte *)POINTER_MAX - pc;
do {
bool rc = query_os ?
get_memory_info_from_os(check_pc, NULL, NULL, &prot) :
get_memory_info(check_pc, NULL, NULL, &prot);
if (!rc || !TESTANY(MEMPROT_READ|MEMPROT_EXEC, prot))
return false;
if (POINTER_OVERFLOW_ON_ADD(check_pc, PAGE_SIZE))
break;
check_pc += PAGE_SIZE;
} while (check_pc < pc+size);
return true;
}
bool
is_readable_without_exception(const byte *pc, size_t size)
{
/* case 9745 / i#853: We've had problems with all_memory_areas not being
* accurate in the past. Parsing proc maps is too slow for some apps, so we
* use a runtime option.
*/
bool query_os = IF_MEMQUERY_ELSE(true, !DYNAMO_OPTION(use_all_memory_areas));
return is_readable_without_exception_internal(pc, size, query_os);
}
/* Identical to is_readable_without_exception except that the os is queried
* for info on the indicated region */
bool
is_readable_without_exception_query_os(byte *pc, size_t size)
{
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_user_address(byte *pc)
{
/* FIXME: NYI */
/* note returning true will always skip the case 9022 logic on Linux */
return true;
}
#endif /* !NOT_DYNAMORIO_CORE_PROPER */
/* change protections on memory region starting at pc of length length
* this does not update the all memory area info
*/
bool
os_set_protection(byte *pc, size_t length, uint prot/*MEMPROT_*/)
{
app_pc start_page = (app_pc) PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
long res = 0;
uint flags = memprot_to_osprot(prot);
#ifdef IA32_ON_IA64
LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n");
LOG(THREAD_GET, LOG_VMAREAS, 1, " attempted change_prot("PFX", "PIFX", %s) => "
"mprotect("PFX", "PIFX")==%d pages\n",
pc, length, memprot_string(prot), start_page, num_bytes,
num_bytes / PAGE_SIZE);
#else
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (!TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
res = mprotect_syscall((void *) start_page, num_bytes, flags);
if (res != 0)
return false;
LOG(THREAD_GET, LOG_VMAREAS, 3, "change_prot("PFX", "PIFX", %s) => "
"mprotect("PFX", "PIFX", %d)==%d pages\n",
pc, length, memprot_string(prot), start_page, num_bytes, flags,
num_bytes / PAGE_SIZE);
#endif
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
return true;
}
#ifndef NOT_DYNAMORIO_CORE_PROPER
/* change protections on memory region starting at pc of length length */
bool
set_protection(byte *pc, size_t length, uint prot/*MEMPROT_*/)
{
if (os_set_protection(pc, length, prot) == false)
return false;
#ifndef HAVE_MEMINFO_QUERY
else {
app_pc start_page = (app_pc) PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
memcache_update_locked(start_page, start_page + num_bytes,
prot, -1/*type unchanged*/, true/*exists*/);
}
#endif
return true;
}
/* change protections on memory region starting at pc of length length */
bool
change_protection(byte *pc, size_t length, bool writable)
{
uint flags = (writable) ? (MEMPROT_READ|MEMPROT_WRITE) : (MEMPROT_READ);
return set_protection(pc, length, flags);
}
/* make pc's page writable */
bool
make_writable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc) PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC|PROT_READ|PROT_WRITE;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc)/*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot |= PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
#ifdef IA32_ON_IA64
LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n");
LOG(THREAD_GET, LOG_VMAREAS, 3,
"attempted make_writable: pc "PFX" -> "PFX"-"PFX"\n",
pc, start_page, start_page + prot_size);
#else
res = mprotect_syscall((void *) start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_writable: pc "PFX" -> "PFX"-"PFX" %d\n",
pc, start_page, start_page + prot_size, res);
ASSERT(res == 0);
if (res != 0)
return false;
#endif
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
#ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot),
-1/*type unchanged*/, true/*exists*/);
}
#endif
return true;
}
/* like make_writable but adds COW */
bool make_copy_on_writable(byte *pc, size_t size)
{
/* FIXME: for current usage this should be fine */
return make_writable(pc, size);
}
/* make pc's page unwritable */
void
make_unwritable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc) PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC|PROT_READ;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc)/*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot &= ~PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
/* inc stats before making unwritable, in case messing w/ data segment */
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
#ifdef IA32_ON_IA64
LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n");
LOG(THREAD_GET, LOG_VMAREAS, 3,
"attempted make_writable: pc "PFX" -> "PFX"-"PFX"\n",
pc, start_page, start_page + prot_size);
#else
res = mprotect_syscall((void *) start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_unwritable: pc "PFX" -> "PFX"-"PFX"\n",
pc, start_page, start_page + prot_size);
ASSERT(res == 0);
# ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1/*type unchanged*/,
false/*!exists*/);
}
# endif
#endif
}
/****************************************************************************/
/* SYSTEM CALLS */
/* SYS_ defines are in /usr/include/bits/syscall.h
* numbers used by libc are in /usr/include/asm/unistd.h
* kernel defines are in /usr/src/linux-2.4/include/asm-i386/unistd.h
* kernel function names are in /usr/src/linux/arch/i386/kernel/entry.S
*
* For now, we've copied the SYS/NR defines from syscall.h and unistd.h
* and put them in our own local syscall.h.
*/
/* num_raw should be the xax register value.
* For a live system call, dcontext_live should be passed (for examining
* the dcontext->last_exit and exit_reason flags); otherwise, gateway should
* be passed.
*/
int
os_normalized_sysnum(int num_raw, instr_t *gateway, dcontext_t *dcontext)
{
#ifdef MACOS
/* The x64 encoding indicates the syscall type in the top 8 bits.
* We drop the 0x2000000 for BSD so we can use the SYS_ enum constants.
* That leaves 0x1000000 for Mach and 0x3000000 for Machdep.
* On 32-bit, a different encoding is used: we transform that
* to the x64 encoding minus BSD.
*/
int interrupt = 0;
int num = 0;
if (gateway != NULL) {
if (instr_is_interrupt(gateway))
interrupt = instr_get_interrupt_number(gateway);
} else {
ASSERT(dcontext != NULL);
if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) {
if (dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x81)
interrupt = 0x81;
else {
ASSERT(dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x82);
interrupt = 0x82;
}
}
}
# ifdef X64
if (num_raw >> 24 == 0x2)
return (int)(num_raw & 0xffffff); /* Drop BSD bit */
else
num = (int) num_raw; /* Keep Mach and Machdep bits */
# else
if ((ptr_int_t)num_raw < 0) /* Mach syscall */
return (SYSCALL_NUM_MARKER_MACH | -(int)num_raw);
else {
/* Bottom 16 bits are the number, top are arg size. */
num = (int)(num_raw & 0xffff);
}
# endif
if (interrupt == 0x81)
num |= SYSCALL_NUM_MARKER_MACH;
else if (interrupt == 0x82)
num |= SYSCALL_NUM_MARKER_MACHDEP;
return num;
#else
return num_raw;
#endif
}
static bool
ignorable_system_call_normalized(int num)
{
switch (num) {
#if defined(SYS_exit_group)
case SYS_exit_group:
#endif
case SYS_exit:
#ifdef MACOS
case SYS_bsdthread_terminate:
#endif
#ifdef LINUX
case SYS_brk:
# ifdef SYS_uselib
case SYS_uselib:
# endif
#endif
#if defined(X64) || !defined(ARM)
case SYS_mmap:
#endif
#if !defined(X64) && !defined(MACOS)
case SYS_mmap2:
#endif
case SYS_munmap:
#ifdef LINUX
case SYS_mremap:
#endif
case SYS_mprotect:
#ifdef ANDROID
case SYS_prctl:
#endif
case SYS_execve:
#ifdef LINUX
case SYS_clone:
#elif defined(MACOS)
case SYS_bsdthread_create:
case SYS_posix_spawn:
#endif
#ifdef SYS_fork
case SYS_fork:
#endif
#ifdef SYS_vfork
case SYS_vfork:
#endif
case SYS_kill:
#if defined(SYS_tkill)
case SYS_tkill:
#endif
#if defined(SYS_tgkill)
case SYS_tgkill:
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_signal:
#endif
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
#if !defined(X64) || defined(MACOS)
case SYS_sigaction:
case SYS_sigsuspend:
case SYS_sigpending:
case SYS_sigreturn:
case SYS_sigprocmask:
#endif
#ifdef LINUX
case SYS_rt_sigreturn:
case SYS_rt_sigaction:
case SYS_rt_sigprocmask:
case SYS_rt_sigpending:
case SYS_rt_sigtimedwait:
case SYS_rt_sigqueueinfo:
case SYS_rt_sigsuspend:
#ifdef SYS_signalfd
case SYS_signalfd:
#endif
case SYS_signalfd4:
#endif
case SYS_sigaltstack:
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_sgetmask:
case SYS_ssetmask:
#endif
case SYS_setitimer:
case SYS_getitimer:
#ifdef MACOS
case SYS_close_nocancel:
#endif
case SYS_close:
#ifdef SYS_dup2
case SYS_dup2:
#endif
#ifdef LINUX
case SYS_dup3:
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl:
#if defined(X64) || !defined(ARM)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
case SYS_setrlimit:
#ifdef LINUX
case SYS_prlimit64:
#endif
#if defined(LINUX) && defined(X86)
/* i#784: app may have behavior relying on SIGALRM */
case SYS_alarm:
#endif
/* i#107: syscall might change/query app's seg memory
* need stop app from clobbering our GDT slot.
*/
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl:
#endif
#if defined(LINUX) && defined(X86)
case SYS_set_thread_area:
case SYS_get_thread_area:
/* FIXME: we might add SYS_modify_ldt later. */
#endif
#if defined(LINUX) && defined(ARM)
/* syscall changes app's thread register */
case SYS_set_tls:
case SYS_cacheflush:
#endif
return false;
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
return !DYNAMO_OPTION(early_inject);
#endif
default:
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(num))
return vmkuw_ignorable_system_call(num);
#endif
return true;
}
}
bool
ignorable_system_call(int num_raw, instr_t *gateway, dcontext_t *dcontext_live)
{
return ignorable_system_call_normalized
(os_normalized_sysnum(num_raw, gateway, dcontext_live));
}
typedef struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
} mmap_arg_struct_t;
#endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */
const reg_id_t syscall_regparms[MAX_SYSCALL_ARGS] = {
#ifdef X86
# ifdef X64
DR_REG_RDI,
DR_REG_RSI,
DR_REG_RDX,
DR_REG_R10, /* RCX goes here in normal x64 calling contention. */
DR_REG_R8,
DR_REG_R9
# else
DR_REG_EBX,
DR_REG_ECX,
DR_REG_EDX,
DR_REG_ESI,
DR_REG_EDI,
DR_REG_EBP
# endif /* 64/32-bit */
#elif defined(AARCHXX)
DR_REG_R0,
DR_REG_R1,
DR_REG_R2,
DR_REG_R3,
DR_REG_R4,
DR_REG_R5,
#endif /* X86/ARM */
};
#ifndef NOT_DYNAMORIO_CORE_PROPER
static inline reg_t *
sys_param_addr(dcontext_t *dcontext, int num)
{
/* we force-inline get_mcontext() and so don't take it as a param */
priv_mcontext_t *mc = get_mcontext(dcontext);
#if defined(X86) && defined(X64)
switch (num) {
case 0: return &mc->xdi;
case 1: return &mc->xsi;
case 2: return &mc->xdx;
case 3: return &mc->r10; /* since rcx holds retaddr for syscall instr */
case 4: return &mc->r8;
case 5: return &mc->r9;
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#else
# ifdef MACOS
/* XXX: if we don't end up using dcontext->sys_was_int here, we could
* make that field Linux-only.
*/
/* For 32-bit, the args are passed on the stack, above a retaddr slot
* (regardless of whether using a sysenter or int gateway).
*/
return ((reg_t *)mc->esp) + 1/*retaddr*/ + num;
# endif
/* even for vsyscall where ecx (syscall) or esp (sysenter) are saved into
* ebp, the original parameter registers are not yet changed pre-syscall,
* except for ebp, which is pushed on the stack:
* 0xffffe400 55 push %ebp %esp -> %esp (%esp)
* 0xffffe401 89 cd mov %ecx -> %ebp
* 0xffffe403 0f 05 syscall -> %ecx
*
* 0xffffe400 51 push %ecx %esp -> %esp (%esp)
* 0xffffe401 52 push %edx %esp -> %esp (%esp)
* 0xffffe402 55 push %ebp %esp -> %esp (%esp)
* 0xffffe403 89 e5 mov %esp -> %ebp
* 0xffffe405 0f 34 sysenter -> %esp
*/
switch (num) {
case 0: return &mc->IF_X86_ELSE(xbx, r0);
case 1: return &mc->IF_X86_ELSE(xcx, r1);
case 2: return &mc->IF_X86_ELSE(xdx, r2);
case 3: return &mc->IF_X86_ELSE(xsi, r3);
case 4: return &mc->IF_X86_ELSE(xdi, r4);
/* FIXME: do a safe_read: but what about performance?
* See the #if 0 below, as well. */
case 5: return IF_X86_ELSE((dcontext->sys_was_int ? &mc->xbp : ((reg_t*)mc->xsp)),
&mc->r5);
# ifdef ARM
/* AArch32 supposedly has 7 args in some cases. */
case 6: return &mc->r6;
# endif
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#endif
return 0;
}
static inline reg_t
sys_param(dcontext_t *dcontext, int num)
{
return *sys_param_addr(dcontext, num);
}
void
set_syscall_param(dcontext_t *dcontext, int param_num, reg_t new_value)
{
*sys_param_addr(dcontext, param_num) = new_value;
}
static inline bool
syscall_successful(priv_mcontext_t *mc, int normalized_sysnum)
{
#ifdef MACOS
if (TEST(SYSCALL_NUM_MARKER_MACH, normalized_sysnum)) {
/* XXX: Mach syscalls vary (for some KERN_SUCCESS=0 is success,
* for others that return mach_port_t 0 is failure (I think?).
* We defer to drsyscall.
*/
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
} else
return !TEST(EFLAGS_CF, mc->eflags);
#else
if (normalized_sysnum == IF_X64_ELSE(SYS_mmap, SYS_mmap2) ||
# if !defined(ARM) && !defined(X64)
normalized_sysnum == SYS_mmap ||
# endif
normalized_sysnum == SYS_mremap)
return mmap_syscall_succeeded((byte *)MCXT_SYSCALL_RES(mc));
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
#endif
}
/* For non-Mac, this does nothing to indicate "success": you can pass -errno.
* For Mac, this clears CF and just sets xax. To return a 64-bit value in
* 32-bit mode, the caller must explicitly set xdx as well (we don't always
* do so b/c syscalls that just return 32-bit values do not touch xdx).
*/
static inline void
set_success_return_val(dcontext_t *dcontext, reg_t val)
{
/* since always coming from dispatch now, only need to set mcontext */
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, except for Mach syscalls, but
* there it doesn't hurt to set CF.
*/
mc->eflags &= ~(EFLAGS_CF);
#endif
MCXT_SYSCALL_RES(mc) = val;
}
/* Always pass a positive value for errno */
static inline void
set_failure_return_val(dcontext_t *dcontext, uint errno_val)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, and errno is positive */
mc->eflags |= EFLAGS_CF;
MCXT_SYSCALL_RES(mc) = errno_val;
#else
MCXT_SYSCALL_RES(mc) = -(int)errno_val;
#endif
}
#ifdef CLIENT_INTERFACE
DR_API
reg_t
dr_syscall_get_param(void *drcontext, int param_num)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall,
"dr_syscall_get_param() can only be called from pre-syscall event");
return sys_param(dcontext, param_num);
}
DR_API
void
dr_syscall_set_param(void *drcontext, int param_num, reg_t new_value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_param() can only be called from a syscall event");
*sys_param_addr(dcontext, param_num) = new_value;
}
DR_API
reg_t
dr_syscall_get_result(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_get_param() can only be called from post-syscall event");
return MCXT_SYSCALL_RES(get_mcontext(dcontext));
}
DR_API
bool
dr_syscall_get_result_ex(void *drcontext, dr_syscall_result_info_t *info INOUT)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"only call dr_syscall_get_param_ex() from post-syscall event");
CLIENT_ASSERT(info != NULL, "invalid parameter");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
info->value = MCXT_SYSCALL_RES(mc);
info->succeeded = syscall_successful(mc, dcontext->sys_num);
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax, but the other syscalls don't clear xdx, so we can't easily
* return a 64-bit value all the time.
*/
IF_X86_ELSE({
info->high = mc->xdx;
}, {
ASSERT_NOT_REACHED();
});
}
if (info->use_errno) {
if (info->succeeded)
info->errno_value = 0;
else {
info->errno_value = (uint)IF_LINUX(-(int))MCXT_SYSCALL_RES(mc);
}
}
return true;
}
DR_API
void
dr_syscall_set_result(void *drcontext, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
/* For non-Mac, the caller can still pass -errno and this will work */
set_success_return_val(dcontext, value);
}
DR_API
bool
dr_syscall_set_result_ex(void *drcontext, dr_syscall_result_info_t *info)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
if (info->use_errno) {
if (info->succeeded) {
/* a weird case but we let the user combine these */
set_success_return_val(dcontext, info->errno_value);
} else
set_failure_return_val(dcontext, info->errno_value);
} else {
if (info->succeeded)
set_success_return_val(dcontext, info->value);
else {
/* use this to set CF, even though it might negate the value */
set_failure_return_val(dcontext, (uint)info->value);
/* now set the value, overriding set_failure_return_val() */
MCXT_SYSCALL_RES(mc) = info->value;
}
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax.
*/
IF_X86_ELSE({
mc->xdx = info->high;
}, {
ASSERT_NOT_REACHED();
});
}
}
return true;
}
DR_API
void
dr_syscall_set_sysnum(void *drcontext, int new_num)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_sysnum() can only be called from a syscall event");
MCXT_SYSNUM_REG(mc) = new_num;
}
DR_API
void
dr_syscall_invoke_another(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_invoke_another() can only be called from post-syscall event");
LOG(THREAD, LOG_SYSCALLS, 2, "invoking additional syscall on client request\n");
dcontext->client_data->invoke_another_syscall = true;
# ifdef X86
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
priv_mcontext_t *mc = get_mcontext(dcontext);
/* restore xbp to xsp */
mc->xbp = mc->xsp;
}
# endif /* X86 */
/* for x64 we don't need to copy xcx into r10 b/c we use r10 as our param */
}
#endif /* CLIENT_INTERFACE */
static inline bool
is_thread_create_syscall_helper(ptr_uint_t sysnum, ptr_uint_t flags)
{
#ifdef MACOS
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
*/
return (sysnum == SYS_bsdthread_create || sysnum == SYS_vfork);
#else
# ifdef SYS_vfork
if (sysnum == SYS_vfork)
return true;
# endif
# ifdef LINUX
if (sysnum == SYS_clone && TEST(CLONE_VM, flags))
return true;
# endif
return false;
#endif
}
bool
is_thread_create_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_thread_create_syscall_helper(MCXT_SYSNUM_REG(mc),
sys_param(dcontext, 0));
}
bool
was_thread_create_syscall(dcontext_t *dcontext)
{
return is_thread_create_syscall_helper(dcontext->sys_num,
/* flags in param0 */
dcontext->sys_param0);
}
static inline bool
is_sigreturn_syscall_helper(int sysnum)
{
#ifdef MACOS
return sysnum == SYS_sigreturn;
#else
return (IF_NOT_X64(sysnum == SYS_sigreturn ||) sysnum == SYS_rt_sigreturn);
#endif
}
bool
is_sigreturn_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_sigreturn_syscall_helper(MCXT_SYSNUM_REG(mc));
}
bool
was_sigreturn_syscall(dcontext_t *dcontext)
{
return is_sigreturn_syscall_helper(dcontext->sys_num);
}
/* process a signal this process/thread is sending to itself */
static void
handle_self_signal(dcontext_t *dcontext, uint sig)
{
/* FIXME PR 297903: watch for all DEFAULT_TERMINATE signals,
* and for any thread in the group, not just self.
*
* FIXME PR 297033: watch for SIGSTOP and SIGCONT.
*
* With -intercept_all_signals, we only need to watch for SIGKILL
* and SIGSTOP here, and we avoid the FIXMEs below. If it's fine
* for DR not to clean up on a SIGKILL, then SIGSTOP is all that's
* left (at least once we have PR 297033 and are intercepting the
* various STOP variations and CONT).
*/
if (sig == SIGABRT && !DYNAMO_OPTION(intercept_all_signals)) {
LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 1,
"thread "TIDFMT" sending itself a SIGABRT\n", get_thread_id());
KSTOP(num_exits_dir_syscall);
/* FIXME: need to check whether app has a handler for SIGABRT! */
/* FIXME PR 211180/6723: this will do SYS_exit rather than the SIGABRT.
* Should do set_default_signal_action(SIGABRT) (and set a flag so
* no races w/ another thread re-installing?) and then SYS_kill.
*/
cleanup_and_terminate(dcontext, SYSNUM_EXIT_THREAD, -1, 0,
(is_last_app_thread() && !dynamo_exited),
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
ASSERT_NOT_REACHED();
}
}
/***************************************************************************
* EXECVE
*/
/* when adding here, also add to the switch in handle_execve if necessary */
enum {
ENV_PROP_RUNUNDER,
ENV_PROP_OPTIONS,
ENV_PROP_EXECVE_LOGDIR,
ENV_PROP_EXE_PATH,
ENV_PROP_CONFIGDIR,
};
static const char * const env_to_propagate[] = {
/* these must line up with the enum */
DYNAMORIO_VAR_RUNUNDER,
DYNAMORIO_VAR_OPTIONS,
/* DYNAMORIO_VAR_EXECVE_LOGDIR is different from DYNAMORIO_VAR_LOGDIR:
* - DYNAMORIO_VAR_LOGDIR: a parent dir inside which a new dir will be created;
* - DYNAMORIO_VAR_EXECVE_LOGDIR: the same subdir with the pre-execve process.
* Xref comment in create_log_dir about their precedence.
*/
DYNAMORIO_VAR_EXECVE_LOGDIR,
/* i#909: needed for early injection */
DYNAMORIO_VAR_EXE_PATH,
/* these will only be propagated if they exist */
DYNAMORIO_VAR_CONFIGDIR,
};
#define NUM_ENV_TO_PROPAGATE (sizeof(env_to_propagate)/sizeof(env_to_propagate[0]))
/* Called at pre-SYS_execve to append DR vars in the target process env vars list.
* For late injection via libdrpreload, we call this for *all children, because
* even if -no_follow_children is specified, a whitelist will still ask for takeover
* and it's libdrpreload who checks the whitelist.
* For -early, however, we check the config ahead of time and only call this routine
* if we in fact want to inject.
* XXX i#1679: these parent vs child differences bring up corner cases of which
* config dir takes precedence (if the child clears the HOME env var, e.g.).
*/
static void
add_dr_env_vars(dcontext_t *dcontext, char *inject_library_path, const char *app_path)
{
char **envp = (char **) sys_param(dcontext, 2);
int idx, j, preload = -1, ldpath = -1;
int num_old, num_new, sz;
bool need_var[NUM_ENV_TO_PROPAGATE];
int prop_idx[NUM_ENV_TO_PROPAGATE];
bool ldpath_us = false, preload_us = false;
char **new_envp, *var, *old;
/* check if any var needs to be propagated */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
prop_idx[j] = -1;
if (get_config_val(env_to_propagate[j]) == NULL)
need_var[j] = false;
else
need_var[j] = true;
}
/* Special handling for DYNAMORIO_VAR_EXECVE_LOGDIR:
* we only need it if follow_children is true and PROCESS_DIR exists.
*/
if (DYNAMO_OPTION(follow_children) && get_log_dir(PROCESS_DIR, NULL, NULL))
need_var[ENV_PROP_EXECVE_LOGDIR] = true;
else
need_var[ENV_PROP_EXECVE_LOGDIR] = false;
if (DYNAMO_OPTION(early_inject))
need_var[ENV_PROP_EXE_PATH] = true;
/* iterate the env in target process */
if (envp == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv is NULL\n");
idx = 0;
} else {
for (idx = 0; envp[idx] != NULL; idx++) {
/* execve env vars should never be set here */
ASSERT(strstr(envp[idx], DYNAMORIO_VAR_EXECVE) != envp[idx]);
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if (strstr(envp[idx], env_to_propagate[j]) == envp[idx]) {
/* If conflict between env and cfg, we assume those env vars
* are for DR usage only, and replace them with cfg value.
*/
prop_idx[j] = idx; /* remember the index for replacing later */
break;
}
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_LIBRARY_PATH=") == envp[idx]) {
ldpath = idx;
if (strstr(envp[idx], inject_library_path) != NULL)
ldpath_us = true;
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_PRELOAD=") == envp[idx]) {
preload = idx;
if (strstr(envp[idx], DYNAMORIO_PRELOAD_NAME) != NULL &&
strstr(envp[idx], DYNAMORIO_LIBRARY_NAME) != NULL) {
preload_us = true;
}
}
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv %d: %s\n", idx, envp[idx]);
}
}
/* We want to add new env vars, so we create a new envp
* array. We have to deallocate them and restore the old
* envp if execve fails; if execve succeeds, the address
* space is reset so we don't need to do anything.
*/
num_old = idx;
/* how many new env vars we need add */
num_new =
2 + /* execve indicator var plus final NULL */
(DYNAMO_OPTION(early_inject) ? 0 :
(((preload<0) ? 1 : 0) +
((ldpath<0) ? 1 : 0)));
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if ((DYNAMO_OPTION(follow_children) || j == ENV_PROP_EXE_PATH) &&
need_var[j] && prop_idx[j] < 0)
num_new++;
}
/* setup new envp */
new_envp = heap_alloc(dcontext, sizeof(char*)*(num_old+num_new)
HEAPACCT(ACCT_OTHER));
/* copy old envp */
memcpy(new_envp, envp, sizeof(char*)*num_old);
/* change/add preload and ldpath if necessary */
if (!DYNAMO_OPTION(early_inject) && !preload_us) {
int idx_preload;
LOG(THREAD, LOG_SYSCALLS, 1,
"WARNING: execve env does NOT preload DynamoRIO, forcing it!\n");
if (preload >= 0) {
/* replace the existing preload */
sz = strlen(envp[preload]) + strlen(DYNAMORIO_PRELOAD_NAME)+
strlen(DYNAMORIO_LIBRARY_NAME) + 3;
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
old = envp[preload] + strlen("LD_PRELOAD=");
snprintf(var, sz, "LD_PRELOAD=%s %s %s",
DYNAMORIO_PRELOAD_NAME, DYNAMORIO_LIBRARY_NAME, old);
idx_preload = preload;
} else {
/* add new preload */
sz = strlen("LD_PRELOAD=") + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(DYNAMORIO_LIBRARY_NAME) + 2;
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_PRELOAD=%s %s",
DYNAMORIO_PRELOAD_NAME, DYNAMORIO_LIBRARY_NAME);
idx_preload = idx++;
}
*(var+sz-1) = '\0'; /* null terminate */
new_envp[idx_preload] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n",
idx_preload, new_envp[idx_preload]);
}
if (!DYNAMO_OPTION(early_inject) && !ldpath_us) {
int idx_ldpath;
if (ldpath >= 0) {
sz = strlen(envp[ldpath]) + strlen(inject_library_path) + 2;
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
old = envp[ldpath] + strlen("LD_LIBRARY_PATH=");
snprintf(var, sz, "LD_LIBRARY_PATH=%s:%s", inject_library_path, old);
idx_ldpath = ldpath;
} else {
sz = strlen("LD_LIBRARY_PATH=") + strlen(inject_library_path) + 1;
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_LIBRARY_PATH=%s", inject_library_path);
idx_ldpath = idx++;
}
*(var+sz-1) = '\0'; /* null terminate */
new_envp[idx_ldpath] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n",
idx_ldpath, new_envp[idx_ldpath]);
}
/* propagating DR env vars */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
const char *val = "";
if (!need_var[j])
continue;
if (!DYNAMO_OPTION(follow_children) && j != ENV_PROP_EXE_PATH)
continue;
switch (j) {
case ENV_PROP_RUNUNDER:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_RUNUNDER) == 0);
/* Must pass RUNUNDER_ALL to get child injected if has no app config.
* If rununder var is already set we assume it's set to 1.
*/
ASSERT((RUNUNDER_ON | RUNUNDER_ALL) == 0x3); /* else, update "3" */
val = "3";
break;
case ENV_PROP_OPTIONS:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_OPTIONS) == 0);
val = option_string;
break;
case ENV_PROP_EXECVE_LOGDIR:
/* we use PROCESS_DIR for DYNAMORIO_VAR_EXECVE_LOGDIR */
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXECVE_LOGDIR) == 0);
ASSERT(get_log_dir(PROCESS_DIR, NULL, NULL));
break;
case ENV_PROP_EXE_PATH:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXE_PATH) == 0);
val = app_path;
break;
default:
val = getenv(env_to_propagate[j]);
if (val == NULL)
val = "";
break;
}
if (j == ENV_PROP_EXECVE_LOGDIR) {
uint logdir_length;
get_log_dir(PROCESS_DIR, NULL, &logdir_length);
/* logdir_length includes the terminating NULL */
sz = strlen(DYNAMORIO_VAR_EXECVE_LOGDIR) + logdir_length + 1/* '=' */;
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=", DYNAMORIO_VAR_EXECVE_LOGDIR);
get_log_dir(PROCESS_DIR, var+strlen(var), &logdir_length);
} else {
sz = strlen(env_to_propagate[j]) + strlen(val) + 2 /* '=' + null */;
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=%s", env_to_propagate[j], val);
}
*(var+sz-1) = '\0'; /* null terminate */
prop_idx[j] = (prop_idx[j] >= 0) ? prop_idx[j] : idx++;
new_envp[prop_idx[j]] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n",
prop_idx[j], new_envp[prop_idx[j]]);
}
if (!DYNAMO_OPTION(follow_children) && !DYNAMO_OPTION(early_inject)) {
if (prop_idx[ENV_PROP_RUNUNDER] >= 0) {
/* disable auto-following of this execve, yet still allow preload
* on other side to inject if config file exists.
* kind of hacky mangle here:
*/
ASSERT(!need_var[ENV_PROP_RUNUNDER]);
ASSERT(new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] == 'D');
new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] = 'X';
}
}
sz = strlen(DYNAMORIO_VAR_EXECVE) + 4;
/* we always pass this var to indicate "post-execve" */
var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER));
/* PR 458917: we overload this to also pass our gdt index */
ASSERT(os_tls_get_gdt_index(dcontext) < 100 &&
os_tls_get_gdt_index(dcontext) >= -1); /* only 2 chars allocated */
snprintf(var, sz, "%s=%02d", DYNAMORIO_VAR_EXECVE, os_tls_get_gdt_index(dcontext));
*(var+sz-1) = '\0'; /* null terminate */
new_envp[idx++] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx-1, new_envp[idx-1]);
/* must end with NULL */
new_envp[idx++] = NULL;
ASSERT((num_new + num_old) == idx);
/* update syscall param */
*sys_param_addr(dcontext, 2) = (reg_t) new_envp; /* OUT */
/* store for reset in case execve fails, and for cleanup if
* this is a vfork thread
*/
dcontext->sys_param0 = (reg_t) envp;
dcontext->sys_param1 = (reg_t) new_envp;
}
static ssize_t
script_file_reader(const char *pathname, void *buf, size_t count)
{
/* FIXME i#2090: Check file is executable. */
file_t file = os_open(pathname, OS_OPEN_READ);
size_t len;
if (file == INVALID_FILE)
return -1;
len = os_read(file, buf, count);
os_close(file);
return len;
}
/* For early injection, recognise when the executable is a script ("#!") and
* modify the syscall parameters to invoke a script interpreter instead. In
* this case we will have allocated memory here but we expect the caller to
* do a non-failing execve of libdynamorio.so and therefore not to have to
* free the memory. That is one reason for checking that the (final) script
* interpreter really is an executable binary.
* We recognise one error case here and return the non-zero error code (ELOOP)
* but in other cases we leave it up to the caller to detect the error, which
* it may do by attempting to exec the path natively, expecting this to fail,
* though there is the obvious danger that the file might have been modified
* just before the exec.
* We do not, and cannot easily, handle a file that is executable but not
* readable. Currently such files will be executed without DynamoRIO though
* in some situations it would be more helpful to stop with an error.
*
* XXX: There is a minor transparency bug with misformed binaries. For example,
* execve can return EINVAL if the ELF executable has more than one PT_INTERP
* segment but we do not check this and so under DynamoRIO the error would be
* detected only after the exec, if we are following the child.
*
* FIXME i#2091: There is a memory leak if a script is recognised, and it is
* later decided not to inject (see where should_inject is set), and the exec
* fails, because in this case there is no mechanism for freeing the memory
* allocated in this function. This function should return sufficient information
* for the caller to free the memory, which it can do so before the exec if it
* reverts to the original syscall arguments and execs the script.
*/
static int
handle_execve_script(dcontext_t *dcontext)
{
char *fname = (char *)sys_param(dcontext, 0);
char **orig_argv = (char **)sys_param(dcontext, 1);
script_interpreter_t *script;
int ret = 0;
script = global_heap_alloc(sizeof(*script) HEAPACCT(ACCT_OTHER));
if (!find_script_interpreter(script, fname, script_file_reader))
goto free_and_return;
if (script->argc == 0) {
ret = ELOOP;
goto free_and_return;
}
/* Check that the final interpreter is an executable binary. */
{
file_t file = os_open(script->argv[0], OS_OPEN_READ);
bool is64;
if (file == INVALID_FILE)
goto free_and_return;
if (!module_file_is_module64(file, &is64, NULL)) {
os_close(file);
goto free_and_return;
}
}
{
size_t i, orig_argc = 0;
char **new_argv;
/* Concatenate new arguments and original arguments. */
while (orig_argv[orig_argc] != NULL)
++orig_argc;
if (orig_argc == 0)
orig_argc = 1;
new_argv = global_heap_alloc((script->argc + orig_argc + 1) * sizeof(char *)
HEAPACCT(ACCT_OTHER));
for (i = 0; i < script->argc; i++)
new_argv[i] = script->argv[i];
new_argv[script->argc] = fname; /* replaces orig_argv[0] */
for (i = 1; i < orig_argc; i++)
new_argv[script->argc + i] = orig_argv[i];
new_argv[script->argc + orig_argc] = NULL;
/* Modify syscall parameters. */
*sys_param_addr(dcontext, 0) = (reg_t)new_argv[0];
*sys_param_addr(dcontext, 1) = (reg_t)new_argv;
}
return 0;
free_and_return:
global_heap_free(script, sizeof(*script) HEAPACCT(ACCT_OTHER));
return ret;
}
static int
handle_execve(dcontext_t *dcontext)
{
/* in /usr/src/linux/arch/i386/kernel/process.c:
* asmlinkage int sys_execve(struct pt_regs regs) { ...
* error = do_execve(filename, (char **) regs.xcx, (char **) regs.xdx, ®s);
* in fs/exec.c:
* int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
*/
/* We need to make sure we get injected into the new image:
* we simply make sure LD_PRELOAD contains us, and that our directory
* is on LD_LIBRARY_PATH (seems not to work to put absolute paths in
* LD_PRELOAD).
* FIXME: this doesn't work for setuid programs
*
* For -follow_children we also pass the current DYNAMORIO_RUNUNDER and
* DYNAMORIO_OPTIONS and logdir to the new image to support a simple
* run-all-children model without bothering w/ setting up config files for
* children, and to support injecting across execve that does not
* preserve $HOME.
* FIXME i#287/PR 546544: we'll need to propagate DYNAMORIO_AUTOINJECT too
* once we use it in preload
*/
/* FIXME i#191: supposed to preserve things like pending signal
* set across execve: going to ignore for now
*/
char *fname;
bool x64 = IF_X64_ELSE(true, false);
bool expect_to_fail = false;
bool should_inject;
file_t file;
char *inject_library_path;
char rununder_buf[16]; /* just an integer printed in ascii */
bool app_specific, from_env, rununder_on;
#if defined(LINUX) || defined(DEBUG)
const char **argv;
#endif
if (DYNAMO_OPTION(follow_children) && DYNAMO_OPTION(early_inject)) {
int ret = handle_execve_script(dcontext);
if (ret != 0)
return ret;
}
fname = (char *)sys_param(dcontext, 0);
#if defined(LINUX) || defined(DEBUG)
argv = (const char **)sys_param(dcontext, 1);
#endif
#ifdef LINUX
if (DYNAMO_OPTION(early_inject) && symlink_is_self_exe(fname)) {
/* i#907: /proc/self/exe points at libdynamorio.so. Make sure we run
* the right thing here.
*/
fname = get_application_name();
}
#endif
LOG(GLOBAL, LOG_ALL, 1, "\n---------------------------------------------------------------------------\n");
LOG(THREAD, LOG_ALL, 1, "\n---------------------------------------------------------------------------\n");
DODEBUG({
int i;
SYSLOG_INTERNAL_INFO("-- execve %s --", fname);
LOG(THREAD, LOG_SYSCALLS, 1, "syscall: execve %s\n", fname);
LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 1, "execve %s\n", fname);
if (stats->loglevel >= 3) {
if (argv == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\targs are NULL\n");
} else {
for (i = 0; argv[i] != NULL; i++) {
LOG(THREAD, LOG_SYSCALLS, 2, "\targ %d: len=%d\n",
i, strlen(argv[i]));
LOG(THREAD, LOG_SYSCALLS, 3, "\targ %d: %s\n",
i, argv[i]);
}
}
}
});
/* i#237/PR 498284: if we're a vfork "thread" we're really in a different
* process and if we exec then the parent process will still be alive. We
* can't easily clean our own state (dcontext, dstack, etc.) up in our
* parent process: we need it to invoke the syscall and the syscall might
* fail. We could expand cleanup_and_terminate to also be able to invoke
* SYS_execve: but execve seems more likely to fail than termination
* syscalls. Our solution is to mark this thread as "execve" and hide it
* from regular thread queries; we clean it up in the process-exiting
* synch_with_thread(), or if the same parent thread performs another vfork
* (to prevent heap accumulation from repeated vfork+execve). Since vfork
* on linux suspends the parent, there cannot be any races with the execve
* syscall completing: there can't even be peer vfork threads, so we could
* set a flag and clean up in dispatch, but that seems overkill. (If vfork
* didn't suspend the parent we'd need to touch a marker file or something
* to know the execve was finished.)
*/
mark_thread_execve(dcontext->thread_record, true);
#ifdef STATIC_LIBRARY
/* no way we can inject, we just lose control */
SYSLOG_INTERNAL_WARNING("WARNING: static DynamoRIO library, losing control on execve");
return 0;
#endif
/* Issue 20: handle cross-architecture execve */
/* Xref alternate solution i#145: use dual paths on
* LD_LIBRARY_PATH to solve cross-arch execve
*/
file = os_open(fname, OS_OPEN_READ);
if (file != INVALID_FILE) {
if (!module_file_is_module64(file, &x64, NULL/*only care about primary==execve*/))
expect_to_fail = true;
os_close(file);
} else
expect_to_fail = true;
inject_library_path = IF_X64_ELSE(x64, !x64) ? dynamorio_library_path :
dynamorio_alt_arch_path;
should_inject = DYNAMO_OPTION(follow_children);
if (get_config_val_other_app(get_short_name(fname), get_process_id(),
x64 ? DR_PLATFORM_64BIT : DR_PLATFORM_32BIT,
DYNAMORIO_VAR_RUNUNDER,
rununder_buf, BUFFER_SIZE_ELEMENTS(rununder_buf),
&app_specific, &from_env, NULL /* 1config is ok */)) {
if (should_inject_from_rununder(rununder_buf, app_specific, from_env,
&rununder_on))
should_inject = rununder_on;
}
if (should_inject)
add_dr_env_vars(dcontext, inject_library_path, fname);
else {
dcontext->sys_param0 = 0;
dcontext->sys_param1 = 0;
}
#ifdef LINUX
/* We have to be accurate with expect_to_fail as we cannot come back
* and fail the syscall once the kernel execs DR!
*/
if (should_inject && DYNAMO_OPTION(early_inject) && !expect_to_fail) {
/* i#909: change the target image to libdynamorio.so */
const char *drpath = IF_X64_ELSE(x64, !x64) ? dynamorio_library_filepath :
dynamorio_alt_arch_filepath;
TRY_EXCEPT(dcontext, /* try */ {
if (symlink_is_self_exe(argv[0])) {
/* we're out of sys_param entries so we assume argv[0] == fname */
dcontext->sys_param3 = (reg_t) argv;
argv[0] = fname; /* XXX: handle readable but not writable! */
} else
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = (reg_t) fname; /* store for restore in post */
*sys_param_addr(dcontext, 0) = (reg_t) drpath;
LOG(THREAD, LOG_SYSCALLS, 2, "actual execve on: %s\n",
(char *)sys_param(dcontext, 0));
}, /* except */ {
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
LOG(THREAD, LOG_SYSCALLS, 2, "argv is unreadable, expect execve to fail\n");
});
} else {
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
}
#endif
/* we need to clean up the .1config file here. if the execve fails,
* we'll just live w/o dynamic option re-read.
*/
config_exit();
return 0;
}
static void
handle_execve_post(dcontext_t *dcontext)
{
/* if we get here it means execve failed (doesn't return on success),
* or we did an execve from a vfork and its memory changes are visible
* in the parent process.
* we have to restore env to how it was and free the allocated heap.
*/
char **old_envp = (char **) dcontext->sys_param0;
char **new_envp = (char **) dcontext->sys_param1;
#ifdef STATIC_LIBRARY
/* nothing to clean up */
return;
#endif
#ifdef LINUX
if (dcontext->sys_param4 != 0) {
/* restore original /proc/.../exe */
*sys_param_addr(dcontext, 0) = dcontext->sys_param4;
if (dcontext->sys_param3 != 0) {
/* restore original argv[0] */
const char **argv = (const char **) dcontext->sys_param3;
argv[0] = (const char *) dcontext->sys_param4;
}
}
#endif
if (new_envp != NULL) {
int i;
LOG(THREAD, LOG_SYSCALLS, 2, "\tcleaning up our env vars\n");
/* we replaced existing ones and/or added new ones.
* we can't compare to old_envp b/c it may have changed by now.
*/
for (i=0; new_envp[i] != NULL; i++) {
if (is_dynamo_address((byte *)new_envp[i])) {
heap_free(dcontext, new_envp[i],
sizeof(char)*(strlen(new_envp[i])+1)
HEAPACCT(ACCT_OTHER));
}
}
i++; /* need to de-allocate final null slot too */
heap_free(dcontext, new_envp, sizeof(char*)*i HEAPACCT(ACCT_OTHER));
/* restore prev envp if we're post-syscall */
if (!dcontext->thread_record->execve)
*sys_param_addr(dcontext, 2) = (reg_t) old_envp;
}
}
/* i#237/PR 498284: to avoid accumulation of thread state we clean up a vfork
* child who invoked execve here so we have at most one outstanding thread. we
* also clean up at process exit and before thread creation. we could do this
* in dispatch but too rare to be worth a flag check there.
*/
static void
cleanup_after_vfork_execve(dcontext_t *dcontext)
{
thread_record_t **threads;
int num_threads, i;
if (num_execve_threads == 0)
return;
mutex_lock(&thread_initexit_lock);
get_list_of_threads_ex(&threads, &num_threads, true/*include execve*/);
for (i=0; i<num_threads; i++) {
if (threads[i]->execve) {
LOG(THREAD, LOG_SYSCALLS, 2, "cleaning up earlier vfork thread "TIDFMT"\n",
threads[i]->id);
dynamo_other_thread_exit(threads[i]);
}
}
mutex_unlock(&thread_initexit_lock);
global_heap_free(threads, num_threads*sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
}
/* returns whether to execute syscall */
static bool
handle_close_pre(dcontext_t *dcontext)
{
/* in fs/open.c: asmlinkage long sys_close(unsigned int fd) */
uint fd = (uint) sys_param(dcontext, 0);
LOG(THREAD, LOG_SYSCALLS, 3, "syscall: close fd %d\n", fd);
/* prevent app from closing our files */
if (fd_is_dr_owned(fd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to close DR file(s)");
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"WARNING: app trying to close DR file %d! Not allowing it.\n", fd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
return false; /* do not execute syscall */
}
/* Xref PR 258731 - duplicate STDOUT/STDERR when app closes them so we (or
* a client) can continue to use them for logging. */
if (DYNAMO_OPTION(dup_stdout_on_close) && fd == STDOUT) {
our_stdout = fd_priv_dup(fd);
if (our_stdout < 0) /* no private fd available */
our_stdout = dup_syscall(fd);
if (our_stdout >= 0)
fd_mark_close_on_exec(our_stdout);
fd_table_add(our_stdout, 0);
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"WARNING: app is closing stdout=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n", fd, our_stdout);
if (privmod_stdout != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stdout _fileno. */
(*privmod_stdout)->STDFILE_FILENO = our_stdout;
}
}
if (DYNAMO_OPTION(dup_stderr_on_close) && fd == STDERR) {
our_stderr = fd_priv_dup(fd);
if (our_stderr < 0) /* no private fd available */
our_stderr = dup_syscall(fd);
if (our_stderr >= 0)
fd_mark_close_on_exec(our_stderr);
fd_table_add(our_stderr, 0);
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"WARNING: app is closing stderr=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n", fd, our_stderr);
if (privmod_stderr != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stderr _fileno. */
(*privmod_stderr)->STDFILE_FILENO = our_stderr;
}
}
if (DYNAMO_OPTION(dup_stdin_on_close) && fd == STDIN) {
our_stdin = fd_priv_dup(fd);
if (our_stdin < 0) /* no private fd available */
our_stdin = dup_syscall(fd);
if (our_stdin >= 0)
fd_mark_close_on_exec(our_stdin);
fd_table_add(our_stdin, 0);
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"WARNING: app is closing stdin=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n", fd, our_stdin);
if (privmod_stdin != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stdout _fileno. */
(*privmod_stdin)->STDFILE_FILENO = our_stdin;
}
}
return true;
}
/***************************************************************************/
/* Used to obtain the pc of the syscall instr itself when the dcontext dc
* is currently in a syscall handler.
* Alternatively for sysenter we could set app_sysenter_instr_addr for Linux.
*/
#define SYSCALL_PC(dc) \
((get_syscall_method() == SYSCALL_METHOD_INT || \
get_syscall_method() == SYSCALL_METHOD_SYSCALL) ? \
(ASSERT(SYSCALL_LENGTH == INT_LENGTH), \
POST_SYSCALL_PC(dc) - INT_LENGTH) : \
(vsyscall_syscall_end_pc - SYSENTER_LENGTH))
static void
handle_exit(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool exit_process = false;
if (dcontext->sys_num == SYSNUM_EXIT_PROCESS) {
/* We can have multiple thread groups within the same address space.
* We need to know whether this is the only group left.
* FIXME: we can have races where new threads are created after our
* check: we'll live with that for now, but the right approach is to
* suspend all threads via synch_with_all_threads(), do the check,
* and if exit_process then exit w/o resuming: though have to
* coordinate lock access w/ cleanup_and_terminate.
* Xref i#94. Xref PR 541760.
*/
process_id_t mypid = get_process_id();
thread_record_t **threads;
int num_threads, i;
exit_process = true;
mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num_threads);
for (i=0; i<num_threads; i++) {
if (threads[i]->pid != mypid && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
exit_process = false;
break;
}
}
if (!exit_process) {
/* We need to clean up the other threads in our group here. */
thread_id_t myid = get_thread_id();
priv_mcontext_t mcontext;
DEBUG_DECLARE(thread_synch_result_t synch_res;)
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"SYS_exit_group %d not final group: %d cleaning up just "
"threads in group\n", get_process_id(), get_thread_id());
/* Set where we are to handle reciprocal syncs */
copy_mcontext(mc, &mcontext);
mc->pc = SYSCALL_PC(dcontext);
for (i=0; i<num_threads; i++) {
if (threads[i]->id != myid && threads[i]->pid == mypid) {
/* See comments in dynamo_process_exit_cleanup(): we terminate
* to make cleanup easier, but may want to switch to shifting
* the target thread to a stack-free loop.
*/
DEBUG_DECLARE(synch_res =)
synch_with_thread(threads[i]->id, true/*block*/,
true/*have initexit lock*/,
THREAD_SYNCH_VALID_MCONTEXT,
THREAD_SYNCH_TERMINATED_AND_CLEANED,
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE);
/* initexit lock may be released and re-acquired in course of
* doing the synch so we may have races where the thread
* exits on its own (or new threads appear): we'll live
* with those for now.
*/
ASSERT(synch_res == THREAD_SYNCH_RESULT_SUCCESS);
}
}
copy_mcontext(&mcontext, mc);
}
mutex_unlock(&thread_initexit_lock);
global_heap_free(threads, num_threads*sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
}
if (is_last_app_thread() && !dynamo_exited) {
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in final thread "TIDFMT" of "PIDFMT" => exiting DynamoRIO\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc),
get_thread_id(), get_process_id());
/* we want to clean up even if not automatic startup! */
automatic_startup = true;
exit_process = true;
} else {
LOG(THREAD, LOG_TOP|LOG_THREADS|LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in thread "TIDFMT" of "PIDFMT" => cleaning up %s\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), get_thread_id(), get_process_id(),
exit_process ? "process" : "thread");
}
KSTOP(num_exits_dir_syscall);
cleanup_and_terminate(dcontext, MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0),
sys_param(dcontext, 1), exit_process,
/* SYS_bsdthread_terminate has 2 more args */
sys_param(dcontext, 2), sys_param(dcontext, 3));
}
#if defined(LINUX) && defined(X86) /* XXX i#58: just until we have Mac support */
static bool
os_set_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
#ifdef X86
int i;
os_thread_data_t *ostd = dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
if (user_desc->seg_not_present == 1) {
/* find an empty one to update */
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
if (desc[i].seg_not_present == 1)
break;
}
if (i < GDT_NUM_TLS_SLOTS) {
user_desc->entry_number = GDT_SELECTOR(i + tls_min_index());
memcpy(&desc[i], user_desc, sizeof(*user_desc));
} else
return false;
} else {
/* If we used early injection, this might be ld.so trying to set up TLS. We
* direct the app to use the GDT entry we already set up for our private
* libraries, but only the first time it requests TLS.
*/
if (user_desc->entry_number == -1 && return_stolen_lib_tls_gdt) {
mutex_lock(&set_thread_area_lock);
if (return_stolen_lib_tls_gdt) {
uint selector = read_thread_register(LIB_SEG_TLS);
uint index = SELECTOR_INDEX(selector);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
return_stolen_lib_tls_gdt = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
user_desc->entry_number = index;
LOG(GLOBAL, LOG_THREADS, 2, "%s: directing app to use "
"selector 0x%x for first call to set_thread_area\n",
__FUNCTION__, selector);
}
mutex_unlock(&set_thread_area_lock);
}
/* update the specific one */
i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: change selector 0x%x base from "PFX" to "PFX"\n",
__FUNCTION__, GDT_SELECTOR(user_desc->entry_number),
desc[i].base_addr, user_desc->base_addr);
memcpy(&desc[i], user_desc, sizeof(*user_desc));
}
/* if not conflict with dr's tls, perform the syscall */
if (IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(SEG_TLS) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(LIB_SEG_TLS))
return false;
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
return true;
}
static bool
os_get_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
#ifdef X86
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
int i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
if (desc[i].seg_not_present == 1)
return false;
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
return true;
}
#endif
/* This function is used for switch lib tls segment on creating thread.
* We switch to app's lib tls seg before thread creation system call, i.e.
* clone and vfork, and switch back to dr's lib tls seg after the system call.
* They are only called on parent thread, not the child thread.
* The child thread's tls is setup in os_tls_app_seg_init.
*/
/* XXX: It looks like the Linux kernel has some dependency on the segment
* descriptor. If using dr's segment descriptor, the created thread will have
* access violation for tls not being setup. However, it works fine if we switch
* the descriptor to app's segment descriptor before creating the thread.
* We should be able to remove this function later if we find the problem.
*/
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app)
{
return os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
}
#ifdef X86
/* dcontext can be NULL if !to_app */
static bool
os_switch_seg_to_base(dcontext_t *dcontext, os_local_state_t *os_tls, reg_id_t seg,
bool to_app, app_pc base)
{
bool res = false;
ASSERT(dcontext != NULL);
ASSERT(IF_X86_ELSE((seg == SEG_FS || seg == SEG_GS),
(seg == DR_REG_TPIDRURW || DR_REG_TPIDRURO)));
switch (os_tls->tls_type) {
# ifdef X64
case TLS_TYPE_ARCH_PRCTL: {
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, base, NULL);
ASSERT(res);
LOG(GLOBAL, LOG_THREADS, 2,
"%s %s: arch_prctl successful for thread "TIDFMT" base "PFX"\n",
__FUNCTION__, to_app ? "to app" : "to DR", get_thread_id(), base);
if (seg == SEG_TLS && base == NULL) {
/* Set the selector to 0 so we don't think TLS is available. */
/* FIXME i#107: Still assumes app isn't using SEG_TLS. */
reg_t zero = 0;
WRITE_DR_SEG(zero);
}
break;
}
# endif
case TLS_TYPE_GDT: {
our_modify_ldt_t desc;
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = GDT_SELECTOR(index);
}
if (selector != 0) {
if (to_app) {
our_modify_ldt_t *areas =
((os_thread_data_t *)dcontext->os_field)->app_thread_areas;
ASSERT((index >= tls_min_index()) &&
((index - tls_min_index()) <= GDT_NUM_TLS_SLOTS));
desc = areas[index - tls_min_index()];
} else {
tls_init_descriptor(&desc, base, GDT_NO_SIZE_LIMIT, index);
}
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, NULL, &desc);
ASSERT(res);
} else {
/* For a selector of zero, we just reset the segment to zero. We
* don't need to call set_thread_area.
*/
res = true; /* Indicate success. */
}
/* XXX i#2098: it is unsafe to call LOG here in between GDT and register changes */
/* i558 update lib seg reg to enforce the segment changes */
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: set_thread_area successful for thread "TIDFMT" base "PFX"\n",
__FUNCTION__, to_app ? "to app" : "to DR", get_thread_id(), base);
break;
}
case TLS_TYPE_LDT: {
uint index;
uint selector;
/* XXX i#1285: added for MacOS private loader, but we don't
* have enough other code to test this yet.
*/
ASSERT_NOT_TESTED();
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = LDT_SELECTOR(index);
}
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: ldt selector swap successful for thread "TIDFMT"\n",
__FUNCTION__, to_app ? "to app" : "to DR", get_thread_id());
break;
}
default:
ASSERT_NOT_REACHED();
return false;
}
ASSERT((!to_app && seg == SEG_TLS) ||
BOOLS_MATCH(to_app, os_using_app_state(dcontext)));
return res;
}
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base)
{
if (tls == NULL) {
ASSERT(dcontext != NULL);
tls = get_os_tls_from_dc(dcontext);
}
return os_switch_seg_to_base(dcontext, tls, SEG_TLS, false, base);
}
#endif /* X86 */
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app)
{
os_local_state_t *os_tls = get_os_tls_from_dc(dcontext);
#ifdef X86
app_pc base;
/* we can only update the executing thread's segment (i#920) */
ASSERT_MESSAGE(CHKLVL_ASSERTS+1/*expensive*/, "can only act on executing thread",
/* i#2089: a clone syscall, or when native, temporarily puts in
* invalid TLS, so we don't check get_thread_private_dcontext().
*/
is_thread_tls_allocated() &&
dcontext->owning_thread == get_sys_thread_id());
if (to_app) {
base = os_get_app_tls_base(dcontext, seg);
} else {
base = os_get_priv_tls_base(dcontext, seg);
}
return os_switch_seg_to_base(dcontext, os_tls, seg, to_app, base);
#elif defined(AARCHXX)
bool res = false;
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(INTERNAL_OPTION(private_loader));
if (to_app) {
/* On switching to app's TLS, we need put DR's TLS base into app's TLS
* at the same offset so it can be loaded on entering code cache.
* Otherwise, the context switch code on entering fcache will fault on
* accessing DR's TLS.
* The app's TLS slot value is stored into privlib's TLS slot for
* later restore on switching back to privlib's TLS.
*/
byte **priv_lib_tls_swap_slot = (byte **)
(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot = (byte **)
(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to app: app slot=&"PFX" *"PFX", priv slot=&"PFX" *"PFX"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
byte *dr_tls_base = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = *app_lib_tls_swap_slot;
*app_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), os_tls->app_lib_tls_base);
res = write_thread_register(os_tls->app_lib_tls_base);
} else {
/* Restore the app's TLS slot that we used for storing DR's TLS base,
* and put DR's TLS base back to privlib's TLS slot.
*/
byte **priv_lib_tls_swap_slot = (byte **)
(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot = (byte **)
(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
byte *dr_tls_base = *app_lib_tls_swap_slot;
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to DR: app slot=&"PFX" *"PFX", priv slot=&"PFX" *"PFX"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
*app_lib_tls_swap_slot = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"),
ostd->priv_lib_tls_base);
res = write_thread_register(ostd->priv_lib_tls_base);
}
LOG(THREAD, LOG_LOADER, 2,
"%s %s: set_tls swap success=%d for thread "TIDFMT"\n",
__FUNCTION__, to_app ? "to app" : "to DR", res, get_thread_id());
return res;
#elif defined(AARCH64)
(void)os_tls;
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
#endif /* X86/ARM/AARCH64 */
}
/* System call interception: put any special handling here
* Arguments come from the pusha right before the call
*/
/* WARNING: flush_fragments_and_remove_region assumes that pre and post system
* call handlers do not examine or modify fcache or its fragments in any
* way except for calling flush_fragments_and_remove_region!
*/
/* WARNING: All registers are IN values, but NOT OUT values --
* must set mcontext's register for that.
*/
/* Returns false if system call should NOT be executed (in which case,
* post_system_call() will *not* be called!).
* Returns true if system call should go ahead
*/
/* XXX: split out specific handlers into separate routines
*/
bool
pre_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool execute_syscall = true;
where_am_i_t old_whereami = dcontext->whereami;
dcontext->whereami = WHERE_SYSCALL_HANDLER;
/* FIXME We haven't yet done the work to detect which syscalls we
* can determine a priori will fail. Once we do, we will set the
* expect_last_syscall_to_fail to true for those case, and can
* confirm in post_system_call() that the syscall failed as
* expected.
*/
DODEBUG(dcontext->expect_last_syscall_to_fail = false;);
/* save key register values for post_system_call (they get clobbered
* in syscall itself)
*/
dcontext->sys_num = os_normalized_sysnum((int)MCXT_SYSNUM_REG(mc), NULL, dcontext);
RSTATS_INC(pre_syscall);
DOSTATS({
if (ignorable_system_call_normalized(dcontext->sys_num))
STATS_INC(pre_syscall_ignorable);
});
LOG(THREAD, LOG_SYSCALLS, 2, "system call %d\n", dcontext->sys_num);
#if defined(LINUX) && defined(X86)
/* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330)
* we fall back on int, but we have to tweak syscall param #5 (ebp)
* Once we have PR 288330 we can remove this.
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
dcontext->sys_xbp = mc->xbp;
/* not using SAFE_READ due to performance concerns (we do this for
* every single system call on systems where we can't hook vsyscall!)
*/
TRY_EXCEPT(dcontext, /* try */ {
mc->xbp = *(reg_t*)mc->xsp;
}, /* except */ {
ASSERT_NOT_REACHED();
mc->xbp = 0;
});
}
#endif
switch (dcontext->sys_num) {
case SYSNUM_EXIT_PROCESS:
# if defined(LINUX) && VMX86_SERVER
if (os_in_vmkernel_32bit()) {
/* on esx 3.5 => ENOSYS, so wait for SYS_exit */
LOG(THREAD, LOG_SYSCALLS, 2, "on esx35 => ignoring exitgroup\n");
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
#endif
/* fall-through */
case SYSNUM_EXIT_THREAD: {
handle_exit(dcontext);
break;
}
/****************************************************************************/
/* MEMORY REGIONS */
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap: {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage int old_mmap(struct mmap_arg_struct_t *arg)
*/
mmap_arg_struct_t *arg = (mmap_arg_struct_t *) sys_param(dcontext, 0);
mmap_arg_struct_t arg_buf;
if (safe_read(arg, sizeof(mmap_arg_struct_t), &arg_buf)) {
void *addr = (void *) arg->addr;
size_t len = (size_t) arg->len;
uint prot = (uint) arg->prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap addr="PFX" size="PIFX" prot=0x%x"
" flags="PIFX" offset="PIFX" fd=%d\n",
addr, len, prot, arg->flags, arg->offset, arg->fd);
/* Check for overlap with existing code or patch-proof regions */
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, arg->flags))) {
/* Rather than failing or skipping the syscall we'd like to just
* remove the hint -- but we don't want to write to app memory, so
* we do fail. We could set up our own mmap_arg_struct_t but
* we'd need dedicate per-thread storage, and SYS_mmap is obsolete.
*/
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t) arg;
break;
}
#endif
case IF_MACOS_ELSE(SYS_mmap,IF_X64_ELSE(SYS_mmap,SYS_mmap2)): {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
*/
void *addr = (void *) sys_param(dcontext, 0);
size_t len = (size_t) sys_param(dcontext, 1);
uint prot = (uint) sys_param(dcontext, 2);
uint flags = (uint) sys_param(dcontext, 3);
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap2 addr="PFX" size="PIFX" prot=0x%x"
" flags="PIFX" offset="PIFX" fd=%d\n",
addr, len, prot, flags,
sys_param(dcontext, 5), sys_param(dcontext, 4));
/* Check for overlap with existing code or patch-proof regions */
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, flags))) {
if (!TEST(MAP_FIXED, flags)) {
/* Rather than failing or skipping the syscall we just remove
* the hint which should eliminate any overlap.
*/
*sys_param_addr(dcontext, 0) = 0;
} else {
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t) addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
dcontext->sys_param3 = flags;
break;
}
/* must flush stale fragments when we see munmap/mremap */
case SYS_munmap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage long sys_munmap(unsigned long addr, uint len)
*/
app_pc addr = (void *) sys_param(dcontext, 0);
size_t len = (size_t) sys_param(dcontext, 1);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: munmap addr="PFX" size="PFX"\n",
addr, len);
RSTATS_INC(num_app_munmaps);
/* FIXME addr is supposed to be on a page boundary so we
* could detect that condition here and set
* expect_last_syscall_to_fail.
*/
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t) addr;
dcontext->sys_param1 = len;
/* We assume that the unmap will succeed and so are conservative
* and remove the region from exec areas and flush all fragments
* prior to issuing the syscall. If the unmap fails, we try to
* recover in post_system_call() by re-adding the region. This
* approach has its shortcomings -- see comments below in
* post_system_call().
*/
/* Check for unmapping a module. */
os_get_module_info_lock();
if (module_overlaps(addr, len)) {
/* FIXME - handle unmapping more than one module at once, or only unmapping
* part of a module (for which case should adjust view size? or treat as full
* unmap?). Theoretical for now as we haven't seen this. */
module_area_t *ma = module_pc_lookup(addr);
ASSERT_CURIOSITY(ma != NULL);
ASSERT_CURIOSITY(addr == ma->start);
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY((app_pc)ALIGN_FORWARD(addr+len, PAGE_SIZE) == ma->end);
os_get_module_info_unlock();
/* i#210:
* we only think a module is removed if its first memory region
* is unloaded (unmapped).
* XREF i#160 to fix the real problem of handling module splitting.
*/
if (ma != NULL && ma->start == addr)
module_list_remove(addr, ALIGN_FORWARD(len, PAGE_SIZE));
} else
os_get_module_info_unlock();
app_memory_deallocation(dcontext, (app_pc)addr, len,
false /* don't own thread_initexit_lock */,
true /* image, FIXME: though not necessarily */);
/* FIXME: case 4983 use is_elf_so_header() */
#ifndef HAVE_MEMINFO_QUERY
memcache_lock();
memcache_remove(addr, addr + len);
memcache_unlock();
#endif
break;
}
#ifdef LINUX
case SYS_mremap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage unsigned long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
*/
dr_mem_info_t info;
app_pc addr = (void *) sys_param(dcontext, 0);
size_t old_len = (size_t) sys_param(dcontext, 1);
size_t new_len = (size_t) sys_param(dcontext, 2);
DEBUG_DECLARE(bool ok;)
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mremap addr="PFX" size="PFX"\n",
addr, old_len);
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t) addr;
dcontext->sys_param1 = old_len;
dcontext->sys_param2 = new_len;
/* i#173
* we need memory type and prot to set the
* new memory region in the post_system_call
*/
DEBUG_DECLARE(ok =)
query_memory_ex(addr, &info);
ASSERT(ok);
dcontext->sys_param3 = info.prot;
dcontext->sys_param4 = info.type;
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(addr, old_len));
os_get_module_info_unlock();
});
break;
}
#endif
case SYS_mprotect: {
/* in /usr/src/linux/mm/mprotect.c:
asmlinkage long sys_mprotect(unsigned long start, uint len,
unsigned long prot)
*/
uint res;
DEBUG_DECLARE(size_t size;)
app_pc addr = (void *) sys_param(dcontext, 0);
size_t len = (size_t) sys_param(dcontext, 1);
uint prot = (uint) sys_param(dcontext, 2);
uint old_memprot = MEMPROT_NONE, new_memprot;
bool exists = true;
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t) addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mprotect addr="PFX" size="PFX" prot=%s\n",
addr, len, memprot_string(osprot_to_memprot(prot)));
if (!get_memory_info(addr, NULL, IF_DEBUG_ELSE(&size, NULL), &old_memprot)) {
exists = false;
/* Xref PR 413109, PR 410921: if the start, or any page, is not mapped,
* this should fail with ENOMEM. We used to force-fail it to avoid
* asserts in our own allmem update code, but there are cases where a
* seemingly unmapped page succeeds (i#1912: next page of grows-down
* initial stack). Thus we let it go through.
*/
LOG(THREAD, LOG_SYSCALLS, 2,
"\t"PFX" isn't mapped: probably mprotect will fail\n", addr);
} else {
/* If mprotect region spans beyond the end of the vmarea then it
* spans 2 or more vmareas with dissimilar protection (xref
* PR 410921) or has unallocated regions in between (PR 413109).
*/
DOCHECK(1, dcontext->mprot_multi_areas = len > size ? true : false;);
}
new_memprot = osprot_to_memprot(prot) |
/* mprotect won't change meta flags */
(old_memprot & MEMPROT_META_FLAGS);
res = app_memory_protection_change(dcontext, addr, len, new_memprot,
&new_memprot, NULL);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE) {
ASSERT_NOT_IMPLEMENTED(false); /* return code? */
} else {
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT_NOT_REACHED();
}
execute_syscall = false;
}
else {
/* FIXME Store state for undo if the syscall fails. */
IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, new_memprot,
-1/*type unchanged*/, exists));
}
break;
}
#ifdef ANDROID
case SYS_prctl:
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
dcontext->sys_param4 = sys_param(dcontext, 4);
break;
#endif
#ifdef LINUX
case SYS_brk: {
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
byte *new_val = (byte *) sys_param(dcontext, 0);
byte *res = emulate_app_brk(dcontext, new_val);
execute_syscall = false;
/* SYS_brk returns old brk on failure */
set_success_return_val(dcontext, (reg_t)res);
} else {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* We store the old break in the param1 slot.
*/
DODEBUG(dcontext->sys_param0 = (reg_t) sys_param(dcontext, 0););
dcontext->sys_param1 = dynamorio_syscall(SYS_brk, 1, 0);
}
break;
}
# ifdef SYS_uselib
case SYS_uselib: {
/* Used to get the kernel to load a share library (legacy system call).
* Was primarily used when statically linking to dynamically loaded shared
* libraries that were loaded at known locations. Shouldn't be used by
* applications using the dynamic loader (ld) which is currently the only
* way we can inject so we don't expect to see this. PR 307621. */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
# endif
#endif
/****************************************************************************/
/* SPAWNING */
#ifdef LINUX
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c
* 32-bit params: flags, newsp, ptid, tls, ctid
* 64-bit params: should be the same yet tls (for ARCH_SET_FS) is in r8?!?
* I don't see how sys_clone gets its special args: shouldn't it
* just get pt_regs as a "special system call"?
* sys_clone(unsigned long clone_flags, unsigned long newsp,
* void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
*/
uint flags = (uint) sys_param(dcontext, 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone with flags = "PFX"\n", flags);
LOG(THREAD, LOG_SYSCALLS, 2, "args: "PFX", "PFX", "PFX", "PFX", "PFX"\n",
sys_param(dcontext, 0), sys_param(dcontext, 1), sys_param(dcontext, 2),
sys_param(dcontext, 3), sys_param(dcontext, 4));
handle_clone(dcontext, flags);
if ((flags & CLONE_VM) == 0) {
LOG(THREAD, LOG_SYSCALLS, 1, "\tWARNING: CLONE_VM not set!\n");
}
/* save for post_system_call */
dcontext->sys_param0 = (reg_t) flags;
/* i#1010: If we have private fds open (usually logfiles), we should
* clean those up before they get reused by a new thread.
* XXX: Ideally we'd do this in fd_table_add(), but we can't acquire
* thread_initexit_lock there.
*/
cleanup_after_vfork_execve(dcontext);
/* For thread creation clone syscalls a clone_record_t structure
* containing the pc after the app's syscall instr and other data
* (see i#27) is placed at the bottom of the dstack (which is allocated
* by create_clone_record() - it also saves app stack and switches
* to dstack). xref i#149/PR 403015.
* Note: This must be done after sys_param0 is set.
*/
if (is_thread_create_syscall(dcontext)) {
create_clone_record(dcontext, sys_param_addr(dcontext, 1) /*newsp*/);
os_clone_pre(dcontext);
} else /* This is really a fork. */
os_fork_pre(dcontext);
break;
}
#elif defined(MACOS)
case SYS_bsdthread_create: {
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
* For now we settle for intercepting bsd threads at the user thread func.
* We miss a little user-mode code but this is enough to get started.
*/
app_pc func = (app_pc) sys_param(dcontext, 0);
void *func_arg = (void *) sys_param(dcontext, 1);
void *clone_rec;
LOG(THREAD, LOG_SYSCALLS, 1, "bsdthread_create: thread func "PFX", arg "PFX"\n",
func, func_arg);
handle_clone(dcontext, CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD);
clone_rec = create_clone_record(dcontext, NULL, func, func_arg);
dcontext->sys_param0 = (reg_t) func;
dcontext->sys_param1 = (reg_t) func_arg;
*sys_param_addr(dcontext, 0) = (reg_t) new_bsdthread_intercept;
*sys_param_addr(dcontext, 1) = (reg_t) clone_rec;
break;
}
case SYS_posix_spawn: {
/* FIXME i#1644: monitor this call which can be fork or exec */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
/* treat as if sys_clone with flags just as sys_vfork does */
/* in /usr/src/linux/arch/i386/kernel/process.c */
uint flags = CLONE_VFORK | CLONE_VM | SIGCHLD;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork\n");
handle_clone(dcontext, flags);
cleanup_after_vfork_execve(dcontext);
/* save for post_system_call, treated as if SYS_clone */
dcontext->sys_param0 = (reg_t) flags;
/* vfork has the same needs as clone. Pass info via a clone_record_t
* structure to child. See SYS_clone for info about i#149/PR 403015.
*/
IF_LINUX(ASSERT(is_thread_create_syscall(dcontext)));
dcontext->sys_param1 = mc->xsp; /* for restoring in parent */
# ifdef MACOS
create_clone_record(dcontext, (reg_t *)&mc->xsp, NULL, NULL);
# else
create_clone_record(dcontext, (reg_t *)&mc->xsp /*child uses parent sp*/);
# endif
os_clone_pre(dcontext);
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork\n");
os_fork_pre(dcontext);
break;
}
#endif
case SYS_execve: {
int ret = handle_execve(dcontext);
if (ret != 0) {
execute_syscall = false;
set_failure_return_val(dcontext, ret);
}
break;
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction,SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
int sig = (int) sys_param(dcontext, 0);
const kernel_sigaction_t *act = (const kernel_sigaction_t *)
sys_param(dcontext, 1);
prev_sigaction_t *oact = (prev_sigaction_t *) sys_param(dcontext, 2);
size_t sigsetsize = (size_t)
/* On Mac there is no size arg (but it doesn't use old sigaction, so
* closer to rt_ than non-rt_ below).
*/
IF_MACOS_ELSE(sizeof(kernel_sigset_t), sys_param(dcontext, 3));
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction %d "PFX" "PFX" %d\n",
IF_MACOS_ELSE("","rt_"), sig, act, oact, sigsetsize);
/* post_syscall does some work as well */
dcontext->sys_param0 = (reg_t) sig;
dcontext->sys_param1 = (reg_t) act;
dcontext->sys_param2 = (reg_t) oact;
dcontext->sys_param3 = (reg_t) sigsetsize;
execute_syscall = handle_sigaction(dcontext, sig, act, oact, sigsetsize, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
/* sys_sigaction(int sig, const struct old_sigaction *act,
* struct old_sigaction *oact)
*/
int sig = (int) sys_param(dcontext, 0);
const old_sigaction_t *act = (const old_sigaction_t *) sys_param(dcontext, 1);
old_sigaction_t *oact = (old_sigaction_t *) sys_param(dcontext, 2);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction %d "PFX" "PFX"\n",
sig, act, oact);
dcontext->sys_param0 = (reg_t) sig;
dcontext->sys_param1 = (reg_t) act;
dcontext->sys_param2 = (reg_t) oact;
execute_syscall = handle_old_sigaction(dcontext, sig, act, oact, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: { /* 119 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, false);
/* app will not expect syscall to return, so when handle_sigreturn
* returns false it always redirects the context, and thus no
* need to set return val here.
*/
break;
}
#endif
#ifdef LINUX
case SYS_rt_sigreturn: { /* 173 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_rt_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, true);
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
#ifdef MACOS
case SYS_sigreturn: {
/* int sigreturn(struct ucontext *uctx, int infostyle) */
execute_syscall = handle_sigreturn(dcontext, (void *) sys_param(dcontext, 0),
(int) sys_param(dcontext, 1));
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
case SYS_sigaltstack: { /* 186 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigaltstack(const stack_t *uss, stack_t *uoss)
*/
const stack_t *uss = (const stack_t *) sys_param(dcontext, 0);
stack_t *uoss = (stack_t *) sys_param(dcontext, 1);
execute_syscall =
handle_sigaltstack(dcontext, uss, uoss);
if (!execute_syscall) {
set_success_return_val(dcontext, 0);
}
break;
}
case IF_MACOS_ELSE(SYS_sigprocmask,SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* we also need access to the params in post_system_call */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
execute_syscall =
handle_sigprocmask(dcontext, (int) sys_param(dcontext, 0),
(kernel_sigset_t *) sys_param(dcontext, 1),
(kernel_sigset_t *) sys_param(dcontext, 2),
(size_t) sys_param(dcontext, 3));
if (!execute_syscall)
set_success_return_val(dcontext, 0);
break;
}
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
case IF_MACOS_ELSE(SYS_sigsuspend,SYS_rt_sigsuspend): { /* 179 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage int
sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
*/
handle_sigsuspend(dcontext, (kernel_sigset_t *) sys_param(dcontext, 0),
(size_t) sys_param(dcontext, 1));
break;
}
#ifdef LINUX
# ifdef SYS_signalfd
case SYS_signalfd: /* 282/321 */
# endif
case SYS_signalfd4: { /* 289 */
/* int signalfd (int fd, const sigset_t *mask, size_t sizemask) */
/* int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) */
ptr_int_t new_result;
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
# ifdef SYS_signalfd
if (dcontext->sys_num == SYS_signalfd)
dcontext->sys_param3 = 0;
else
# endif
dcontext->sys_param3 = sys_param(dcontext, 3);
new_result =
handle_pre_signalfd(dcontext, (int) dcontext->sys_param0,
(kernel_sigset_t *) dcontext->sys_param1,
(size_t) dcontext->sys_param2,
(int) dcontext->sys_param3);
execute_syscall = false;
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, new_result);
break;
}
#endif
case SYS_kill: { /* 37 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_kill(int pid, int sig)
*/
pid_t pid = (pid_t) sys_param(dcontext, 0);
uint sig = (uint) sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 2,
"thread "TIDFMT" sending signal %d to pid "PIDFMT"\n",
get_thread_id(), sig, pid);
/* We check whether targeting this process or this process group */
if (pid == get_process_id() || pid == 0 || pid == -get_process_group_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#if defined(SYS_tkill)
case SYS_tkill: { /* 238 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tkill(int pid, int sig)
*/
pid_t tid = (pid_t) sys_param(dcontext, 0);
uint sig = (uint) sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 2,
"thread "TIDFMT" sending signal %d to tid %d\n",
get_thread_id(), sig, tid);
if (tid == get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
#if defined(SYS_tgkill)
case SYS_tgkill: { /* 270 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tgkill(int tgid, int pid, int sig)
*/
pid_t tgid = (pid_t) sys_param(dcontext, 0);
pid_t tid = (pid_t) sys_param(dcontext, 1);
uint sig = (uint) sys_param(dcontext, 2);
LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 2,
"thread "TIDFMT" sending signal %d to tid %d tgid %d\n",
get_thread_id(), sig, tid, tgid);
/* some kernels support -1 values:
+ tgkill(-1, tid, sig) == tkill(tid, sig)
* tgkill(tgid, -1, sig) == kill(tgid, sig)
* the 2nd was proposed but is not in 2.6.20 so I'm ignoring it, since
* I don't want to kill the thread when the signal is never sent!
* FIXME: the 1st is in my tkill manpage, but not my 2.6.20 kernel sources!
*/
if ((tgid == -1 || tgid == get_process_id()) &&
tid == get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
case SYS_setitimer: /* 104 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
handle_pre_setitimer(dcontext, (int) sys_param(dcontext, 0),
(const struct itimerval *) sys_param(dcontext, 1),
(struct itimerval *) sys_param(dcontext, 2));
break;
case SYS_getitimer: /* 105 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
dcontext->sys_param0 = sys_param(dcontext, 0);
handle_pre_alarm(dcontext, (unsigned int) dcontext->sys_param0);
break;
#endif
#if 0
# ifndef X64
case SYS_signal: { /* 48 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage unsigned long
sys_signal(int sig, __sighandler_t handler)
*/
break;
}
case SYS_sigsuspend: { /* 72 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
*/
break;
}
case SYS_sigprocmask: { /* 126 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
*/
break;
}
# endif
#else
/* until we've implemented them, keep down here to get warning: */
# if defined(LINUX) && !defined(X64)
# ifndef ARM
case SYS_signal:
# endif
case SYS_sigsuspend:
case SYS_sigprocmask:
# endif
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigpending: /* 73 */
# ifndef ARM
case SYS_sgetmask: /* 68 */
case SYS_ssetmask: /* 69 */
# endif
#endif
#ifdef LINUX
case SYS_rt_sigtimedwait: /* 177 */
case SYS_rt_sigqueueinfo: /* 178 */
#endif
case IF_MACOS_ELSE(SYS_sigpending,SYS_rt_sigpending): { /* 176 */
/* FIXME i#92: handle all of these syscalls! */
LOG(THREAD, LOG_ASYNCH|LOG_SYSCALLS, 1,
"WARNING: unhandled signal system call %d\n", dcontext->sys_num);
SYSLOG_INTERNAL_WARNING_ONCE("unhandled signal system call %d",
dcontext->sys_num);
break;
}
/****************************************************************************/
/* FILES */
/* prevent app from closing our files or opening a new file in our fd space.
* it's not worth monitoring all syscalls that take in fds from affecting ours.
*/
#ifdef MACOS
case SYS_close_nocancel:
#endif
case SYS_close: {
execute_syscall = handle_close_pre(dcontext);
#ifdef LINUX
if (execute_syscall)
signal_handle_close(dcontext, (file_t) sys_param(dcontext, 0));
#endif
break;
}
#ifdef SYS_dup2
case SYS_dup2:
IF_LINUX(case SYS_dup3:) {
file_t newfd = (file_t) sys_param(dcontext, 1);
if (fd_is_dr_owned(newfd) || fd_is_in_private_range(newfd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to dup-close DR file(s)");
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"WARNING: app trying to dup2/dup3 to %d. Disallowing.\n", newfd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
execute_syscall = false;
}
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
int cmd = (int) sys_param(dcontext, 1);
long arg = (long) sys_param(dcontext, 2);
/* we only check for asking for min in private space: not min below
* but actual will be above (see notes in os_file_init())
*/
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC) && fd_is_in_private_range(arg)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to open private fd(s)");
LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1,
"WARNING: app trying to dup to >= %d. Disallowing.\n", arg);
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
} else {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = cmd;
}
break;
}
#if defined(X64) || !defined(ARM) || defined(MACOS)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* resource */
dcontext->sys_param1 = sys_param(dcontext, 1); /* rlimit */
break;
case SYS_setrlimit: {
int resource = (int) sys_param(dcontext, 0);
if (resource == RLIMIT_NOFILE && DYNAMO_OPTION(steal_fds) > 0) {
# if !defined(ARM) && !defined(X64) && !defined(MACOS)
struct compat_rlimit rlim;
# else
struct rlimit rlim;
# endif
if (safe_read((void *)sys_param(dcontext, 1), sizeof(rlim), &rlim) &&
rlim.rlim_max <= min_dr_fd && rlim.rlim_cur <= rlim.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
} else {
/* don't let app raise limits as that would mess up our fd space */
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
execute_syscall = false;
}
break;
}
#ifdef LINUX
case SYS_prlimit64:
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* pid */
dcontext->sys_param1 = sys_param(dcontext, 1); /* resource */
dcontext->sys_param2 = sys_param(dcontext, 2); /* new rlimit */
dcontext->sys_param3 = sys_param(dcontext, 3); /* old rlimit */
if (/* XXX: how do we handle the case of setting rlimit.nofile on another
* process that is running with DynamoRIO?
*/
/* XXX: CLONE_FILES allows different processes to share the same file
* descriptor table, and different threads of the same process have
* separate file descriptor tables. POSIX specifies that rlimits are
* per-process, not per-thread, and Linux follows suit, so the threads
* with different descriptors will not matter, and the pids sharing
* descriptors turns into the hard-to-solve IPC problem.
*/
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id()) &&
dcontext->sys_param1 == RLIMIT_NOFILE &&
dcontext->sys_param2 != (reg_t)NULL && DYNAMO_OPTION(steal_fds) > 0) {
struct rlimit rlim;
if (safe_read((void *)(dcontext->sys_param2), sizeof(rlim), &rlim) &&
rlim.rlim_max <= min_dr_fd && rlim.rlim_cur <= rlim.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
/* set old rlimit if necessary */
if (dcontext->sys_param3 != (reg_t)NULL) {
safe_write_ex((void *)(dcontext->sys_param3), sizeof(rlim),
&app_rlimit_nofile, NULL);
}
} else {
/* don't let app raise limits as that would mess up our fd space */
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
execute_syscall = false;
}
break;
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (DYNAMO_OPTION(early_inject)) {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
if (dcontext->sys_num == SYS_readlinkat)
dcontext->sys_param3 = sys_param(dcontext, 3);
}
break;
/* i#107 syscalls that might change/query app's segment */
# if defined(X86) && defined(X64)
case SYS_arch_prctl: {
/* we handle arch_prctl in post_syscall */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
}
# endif
# ifdef X86
case SYS_set_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
safe_read((void *)sys_param(dcontext, 0),
sizeof(desc), &desc)) {
if (os_set_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0),
sizeof(desc), &desc, NULL)) {
/* check if the range is unlimited */
ASSERT_CURIOSITY(desc.limit == 0xfffff);
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
case SYS_get_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
safe_read((const void *)sys_param(dcontext, 0),
sizeof(desc), &desc)) {
if (os_get_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0),
sizeof(desc), &desc, NULL)) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
# endif /* X86 */
# ifdef ARM
case SYS_set_tls: {
LOG(THREAD, LOG_VMAREAS|LOG_SYSCALLS, 2,
"syscall: set_tls "PFX"\n", sys_param(dcontext, 0));
if (os_set_app_tls_base(dcontext, TLS_REG_LIB, (void *)sys_param(dcontext, 0))) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
} else {
ASSERT_NOT_REACHED();
}
break;
}
case SYS_cacheflush: {
/* We assume we don't want to change the executable_areas list or change
* the selfmod status of this region: else we should call something
* that invokes handle_modified_code() in a way that handles a bigger
* region than a single write.
*/
app_pc start = (app_pc) sys_param(dcontext, 0);
app_pc end = (app_pc) sys_param(dcontext, 1);
LOG(THREAD, LOG_VMAREAS|LOG_SYSCALLS, 2,
"syscall: cacheflush "PFX"-"PFX"\n", start, end);
flush_fragments_from_region(dcontext, start, end - start,
/* An unlink flush should be fine: the app must
* use synch to ensure other threads see the
* new code.
*/
false/*don't force synchall*/);
break;
}
# endif /* ARM */
#elif defined(MACOS)
/* FIXME i#58: handle i386_{get,set}_ldt and thread_fast_set_cthread_self64 */
#endif
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
dcontext->sys_param0 = sys_param(dcontext, 0);
break;
}
# endif
#endif
default: {
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(dcontext->sys_num)) {
execute_syscall = vmkuw_pre_system_call(dcontext);
break;
}
#endif
}
} /* end switch */
dcontext->whereami = old_whereami;
return execute_syscall;
}
void
all_memory_areas_lock(void)
{
IF_NO_MEMQUERY(memcache_lock());
}
void
all_memory_areas_unlock(void)
{
IF_NO_MEMQUERY(memcache_unlock());
}
void
update_all_memory_areas(app_pc start, app_pc end, uint prot, int type)
{
IF_NO_MEMQUERY(memcache_update(start, end, prot, type));
}
bool
remove_from_all_memory_areas(app_pc start, app_pc end)
{
IF_NO_MEMQUERY(return memcache_remove(start, end));
return true;
}
/* We consider a module load to happen at the first mmap, so we check on later
* overmaps to ensure things look consistent. */
static bool
mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode,
bool at_map)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(base);
if (ma != NULL) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
LOG(GLOBAL, LOG_VMAREAS, 2, "%s mmap overlapping module area : \n"
"\tmap : base="PFX" base+size="PFX" inode="UINT64_FORMAT_STRING"\n"
"\tmod : start="PFX" end="PFX" inode="UINT64_FORMAT_STRING"\n",
at_map ? "new" : "existing", base, base+size, inode,
ma->start, ma->end, ma->names.inode);
ASSERT_CURIOSITY(base >= ma->start);
if (at_map) {
ASSERT_CURIOSITY(base+size <= ma->end);
} else {
/* FIXME - I'm having problems with this check for existing maps. I
* haven't been able to get gdb to break in early enough to really get a good
* look at the early loader behavior. Two issues: One case is with our .so
* for which the anonymous .bss mapping is one page larger than expected
* (which might be some loader bug in the size calculation? or something? if
* so should see it trigger the at_map curiosity on some dll and can address
* then) and the other is that for a few executables the .bss mapping is much
* larger (~0x20000 larger) then expected when running under DR (but not
* running natively where it is instead the expected size). Both could just
* be the loader merging adjacent identically protected regions though I
* can't explain the discrepancy between DR and native given that our vmmheap
* is elsewhere in the address space (so who and how allocated that adjacent
* memory). I've yet to see any issue with dynamically loaded modules so
* it's probably the loader merging regions. Still worth investigating. */
ASSERT_CURIOSITY(inode == 0 /*see above comment*/||
module_contains_addr(ma, base+size-1));
}
ASSERT_CURIOSITY(ma->names.inode == inode || inode == 0 /* for .bss */);
DOCHECK(1, {
if (readable && module_is_header(base, size)) {
/* Case 8879: For really small modules, to save disk space, the same
* disk page could hold both RO and .data, occupying just 1 page of
* disk space, e.g. /usr/lib/httpd/modules/mod_auth_anon.so. When
* such a module is mapped in, the os maps the same disk page twice,
* one readonly and one copy-on-write (see pg. 96, Sec 4.4 from
* Linkers and Loaders by John R. Levine). This makes the data
* section also satisfy the elf_header check above. So, if the new
* mmap overlaps an elf_area and it is also a header, then make sure
* the previous page (correcting for alignment) is also a elf_header.
* Note, if it is a header of a different module, then we'll not have
* an overlap, so we will not hit this case.
*/
ASSERT_CURIOSITY(ma->start + ma->os_data.alignment == base);
}
});
}
os_get_module_info_unlock();
#ifdef ANDROID
/* i#1860: we need to keep looking for the segment with .dynamic as Android's
* loader does not map the whole file up front.
*/
if (ma != NULL && at_map && readable)
os_module_update_dynamic_info(base, size, at_map);
#endif
return ma != NULL;
}
static void
os_add_new_app_module(dcontext_t *dcontext, bool at_map,
app_pc base, size_t size, uint memprot)
{
memquery_iter_t iter;
bool found_map = false;
uint64 inode = 0;
const char *filename = "";
size_t mod_size = size;
if (!at_map) {
/* the size is the first seg size, get the whole module size instead */
app_pc first_seg_base = NULL;
app_pc first_seg_end = NULL;
app_pc last_seg_end = NULL;
if (module_walk_program_headers(base, size, at_map, false,
&first_seg_base,
&first_seg_end,
&last_seg_end,
NULL, NULL)) {
ASSERT_CURIOSITY(size == (ALIGN_FORWARD(first_seg_end, PAGE_SIZE) -
(ptr_uint_t)first_seg_base) ||
base == vdso_page_start ||
base == vsyscall_page_start);
mod_size = ALIGN_FORWARD(last_seg_end, PAGE_SIZE) -
(ptr_uint_t)first_seg_base;
}
}
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 2, "dlopen "PFX"-"PFX"%s\n",
base, base+mod_size, TEST(MEMPROT_EXEC, memprot) ? " +x": "");
/* Mapping in a new module. From what we've observed of the loader's
* behavior, it first maps the file in with size equal to the final
* memory image size (I'm not sure how it gets that size without reading
* in the elf header and then walking through all the program headers to
* get the largest virtual offset). This is necessary to reserve all the
* space that will be needed. It then walks through the program headers
* mapping over the the previously mapped space with the appropriate
* permissions and offsets. Note that the .bss portion is mapped over
* as anonymous. It may also, depending on the program headers, make some
* areas read-only after fixing up their relocations etc. NOTE - at
* no point are the section headers guaranteed to be mapped in so we can't
* reliably walk sections (only segments) without looking to disk.
*/
/* FIXME - when should we add the module to our list? At the first map
* seems to be the best choice as we know the bounds and it's difficult to
* tell when the loader is finished. The downside is that at the initial map
* the memory layout isn't finalized (memory beyond the first segment will
* be shifted for page alignment reasons), so we have to be careful and
* make adjustments to read anything beyond the first segment until the
* loader finishes. This goes for the client too as it gets notified when we
* add to the list. FIXME we could try to track the expected segment overmaps
* and only notify the client after the last one (though that's still before
* linking and relocation, but that's true on Windows too). */
/* Get filename & inode for the list. */
memquery_iterator_start(&iter, base, true /* plan to alloc a module_area_t */);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start == base) {
if (iter.vm_start == vsyscall_page_start) {
ASSERT_CURIOSITY(!at_map);
} else {
ASSERT_CURIOSITY(iter.inode != 0 || base == vdso_page_start);
ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY((iter.vm_end - iter.vm_start ==
ALIGN_FORWARD(size, PAGE_SIZE)));
inode = iter.inode;
filename = dr_strdup(iter.comment HEAPACCT(ACCT_OTHER));
found_map = true;
}
break;
}
}
memquery_iterator_stop(&iter);
#ifdef HAVE_MEMINFO
/* barring weird races we should find this map except [vdso] */
ASSERT_CURIOSITY(found_map || base == vsyscall_page_start || base == vdso_page_start);
#else /* HAVE_MEMINFO */
/* Without /proc/maps or other memory querying interface available at
* library map time, there is no way to find out the name of the file
* that was mapped, thus its inode isn't available either.
*
* Just module_list_add with no filename will still result in
* library name being extracted from the .dynamic section and added
* to the module list. However, this name may not always exist, thus
* we might have a library with no file name available at all!
*
* Note: visor implements vsi mem maps that give file info, but, no
* path, should be ok. xref PR 401580.
*
* Once PR 235433 is implemented in visor then fix memquery_iterator*() to
* use vsi to find out page protection info, file name & inode.
*/
#endif /* HAVE_MEMINFO */
/* XREF 307599 on rounding module end to the next PAGE boundary */
if (found_map) {
module_list_add(base, ALIGN_FORWARD(mod_size, PAGE_SIZE),
at_map, filename, inode);
dr_strfree(filename HEAPACCT(ACCT_OTHER));
}
}
void
os_check_new_app_module(dcontext_t *dcontext, app_pc pc)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
/* ma might be NULL due to dynamic generated code or custom loaded modules */
if (ma == NULL) {
dr_mem_info_t info;
/* i#1760: an app module loaded by custom loader (e.g., bionic libc)
* might not be detected by DynamoRIO in process_mmap.
*/
if (query_memory_ex_from_os(pc, &info) && info.type == DR_MEMTYPE_IMAGE) {
/* add the missing module */
os_get_module_info_unlock();
os_add_new_app_module(get_thread_private_dcontext(), false/*!at_map*/,
info.base_pc, info.size, info.prot);
os_get_module_info_lock();
}
}
os_get_module_info_unlock();
}
/* All processing for mmap and mmap2. */
static void
process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot,
uint flags _IF_DEBUG(const char *map_type))
{
bool image = false;
uint memprot = osprot_to_memprot(prot);
#ifdef ANDROID
/* i#1861: avoid merging file-backed w/ anon regions */
if (!TEST(MAP_ANONYMOUS, flags))
memprot |= MEMPROT_HAS_COMMENT;
#endif
LOG(THREAD, LOG_SYSCALLS, 4, "process_mmap("PFX","PFX",0x%x,%s,%s)\n",
base, size, flags, memprot_string(memprot), map_type);
/* Notes on how ELF SOs are mapped in.
*
* o The initial mmap for an ELF file specifies enough space for
* all segments (and their constituent sections) in the file.
* The protection bits for that section are used for the entire
* region, and subsequent mmaps for subsequent segments within
* the region modify their portion's protection bits as needed.
* So if the prot bits for the first segment are +x, the entire
* region is +x. ** Note that our primary concern is adjusting
* exec areas to reflect the prot bits of subsequent
* segments. ** The region is added to the all-memory areas
* and also to exec areas (as determined by app_memory_allocation()).
*
* o Any subsequent segment sub-mappings specify their own protection
* bits and therefore are added to the exec areas via normal
* processing. They are also "naturally" added to the all-mems list.
* We do a little extra processing when mapping into a previously
* mapped region and the prot bits mismatch; if the new mapping is
* not +x, flushing needs to occur.
*/
/* process_mmap can be called with PROT_NONE, so we need to check if we
* can read the memory to see if it is a elf_header
*/
/* XXX: get inode for check */
if (TEST(MAP_ANONYMOUS, flags)) {
/* not an ELF mmap */
LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": anon\n", base);
} else if (mmap_check_for_module_overlap(base, size,
TEST(MEMPROT_READ, memprot), 0, true)) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": overlaps image\n", base);
} else if (TEST(MEMPROT_READ, memprot) &&
/* i#727: We can still get SIGBUS on mmap'ed files that can't be
* read, so pass size=0 to use a safe_read.
*/
module_is_header(base, 0)) {
#ifdef ANDROID
/* The Android loader's initial all-segment-covering mmap is anonymous */
dr_mem_info_t info;
if (query_memory_ex_from_os((byte *)ALIGN_FORWARD(base+size, PAGE_SIZE), &info) &&
info.prot == MEMPROT_NONE && info.type == DR_MEMTYPE_DATA) {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": Android elf\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true/*at_map*/, base,
/* pass segment size, not whole module size */
size, memprot);
} else
#endif
if (module_is_partial_map(base, size, memprot)) {
/* i#1240: App might read first page of ELF header using mmap, which
* might accidentally be treated as a module load. Heuristically
* distinguish this by saying that if this is the first mmap for an ELF
* (i.e., it doesn't overlap with a previous map), and if it's small,
* then don't treat it as a module load.
*/
LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": partial\n", base);
} else {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": elf header\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true/*at_map*/, base, size, memprot);
}
}
IF_NO_MEMQUERY(memcache_handle_mmap(dcontext, base, size, memprot, image));
/* app_memory_allocation() expects to not see an overlap -- exec areas
* doesn't expect one. We have yet to see a +x mmap into a previously
* mapped +x region, but we do check and handle in pre-syscall (i#1175).
*/
LOG(THREAD, LOG_SYSCALLS, 4, "\t try app_mem_alloc\n");
if (app_memory_allocation(dcontext, base, size, memprot, image _IF_DEBUG(map_type)))
STATS_INC(num_app_code_modules);
LOG(THREAD, LOG_SYSCALLS, 4, "\t app_mem_alloc -- DONE\n");
}
#ifdef LINUX
/* Call right after the system call.
* i#173: old_prot and old_type should be from before the system call
*/
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size,
byte *old_base, size_t old_size, uint old_prot, uint old_type)
{
if (!mmap_syscall_succeeded(base))
return false;
if (base != old_base || size < old_size) { /* take action only if
* there was a change */
DEBUG_DECLARE(bool ok;)
/* fragments were shifted...don't try to fix them, just flush */
app_memory_deallocation(dcontext, (app_pc)old_base, old_size,
false /* don't own thread_initexit_lock */,
false /* not image, FIXME: somewhat arbitrary */);
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(base, size));
os_get_module_info_unlock();
});
/* Verify that the current prot on the new region (according to
* the os) is the same as what the prot used to be for the old
* region.
*/
DOCHECK(1, {
uint memprot;
ok = get_memory_info_from_os(base, NULL, NULL, &memprot);
/* allow maps to have +x,
* +x may be caused by READ_IMPLIES_EXEC set in personality flag (i#262)
*/
ASSERT(ok && (memprot == old_prot ||
(memprot & (~MEMPROT_EXEC)) == old_prot));
});
app_memory_allocation(dcontext, base, size, old_prot,
old_type == DR_MEMTYPE_IMAGE
_IF_DEBUG("mremap"));
IF_NO_MEMQUERY(memcache_handle_mremap(dcontext, base, size, old_base, old_size,
old_prot, old_type));
}
return true;
}
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk/*if known*/,
byte *old_brk, byte *new_brk)
{
/* i#851: the brk might not be page aligned */
old_brk = (app_pc) ALIGN_FORWARD(old_brk, PAGE_SIZE);
new_brk = (app_pc) ALIGN_FORWARD(new_brk, PAGE_SIZE);
if (new_brk < old_brk) {
/* Usually the heap is writable, so we don't really need to call
* this here: but seems safest to do so, esp if someone made part of
* the heap read-only and then put code there.
*/
app_memory_deallocation(dcontext, new_brk, old_brk - new_brk,
false /* don't own thread_initexit_lock */,
false /* not image */);
} else if (new_brk > old_brk) {
/* No need to call app_memory_allocation() as doesn't interact
* w/ security policies.
*/
}
IF_NO_MEMQUERY(memcache_handle_app_brk(lowest_brk, old_brk, new_brk));
}
#endif
/* This routine is *not* called is pre_system_call() returns false to skip
* the syscall.
*/
/* XXX: split out specific handlers into separate routines
*/
void
post_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
/* registers have been clobbered, so sysnum is kept in dcontext */
int sysnum = dcontext->sys_num;
/* We expect most syscall failures to return < 0, so >= 0 is success.
* Some syscall return addresses that have the sign bit set and so
* appear to be failures but are not. They are handled on a
* case-by-case basis in the switch statement below.
*/
ptr_int_t result = (ptr_int_t) MCXT_SYSCALL_RES(mc); /* signed */
bool success = syscall_successful(mc, sysnum);
app_pc base;
size_t size;
uint prot;
where_am_i_t old_whereami;
DEBUG_DECLARE(bool ok;)
RSTATS_INC(post_syscall);
old_whereami = dcontext->whereami;
dcontext->whereami = WHERE_SYSCALL_HANDLER;
#if defined(LINUX) && defined(X86)
/* PR 313715: restore xbp since for some vsyscall sequences that use
* the syscall instruction its value is needed:
* 0xffffe400 <__kernel_vsyscall+0>: push %ebp
* 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
* 0xffffe403 <__kernel_vsyscall+3>: syscall
* 0xffffe405 <__kernel_vsyscall+5>: mov $0x2b,%ecx
* 0xffffe40a <__kernel_vsyscall+10>: movl %ecx,%ss
* 0xffffe40c <__kernel_vsyscall+12>: mov %ebp,%ecx
* 0xffffe40e <__kernel_vsyscall+14>: pop %ebp
* 0xffffe40f <__kernel_vsyscall+15>: ret
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
mc->xbp = dcontext->sys_xbp;
}
#endif
/* handle fork, try to do it early before too much logging occurs */
if (false
# ifdef SYS_fork
|| sysnum == SYS_fork
# endif
IF_LINUX(|| (sysnum == SYS_clone && !TEST(CLONE_VM, dcontext->sys_param0)))) {
if (result == 0) {
/* we're the child */
thread_id_t child = get_sys_thread_id();
# ifdef DEBUG
thread_id_t parent = get_parent_id();
SYSLOG_INTERNAL_INFO("-- parent %d forked child %d --", parent, child);
# endif
/* first, fix TLS of dcontext */
ASSERT(parent != 0);
/* change parent pid to our pid */
replace_thread_id(dcontext->owning_thread, child);
dcontext->owning_thread = child;
dcontext->owning_process = get_process_id();
/* now let dynamo initialize new shared memory, logfiles, etc.
* need access to static vars in dynamo.c, that's why we don't do it. */
/* FIXME - xref PR 246902 - dispatch runs a lot of code before
* getting to post_system_call() is any of that going to be messed up
* by waiting till here to fixup the child logfolder/file and tid?
*/
dynamorio_fork_init(dcontext);
LOG(THREAD, LOG_SYSCALLS, 1,
"after fork-like syscall: parent is %d, child is %d\n", parent, child);
} else {
/* we're the parent */
os_fork_post(dcontext, true/*parent*/);
}
}
LOG(THREAD, LOG_SYSCALLS, 2,
"post syscall: sysnum="PFX", result="PFX" (%d)\n",
sysnum, MCXT_SYSCALL_RES(mc), (int)MCXT_SYSCALL_RES(mc));
switch (sysnum) {
/****************************************************************************/
/* MEMORY REGIONS */
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
if (success) {
/* useful for figuring out what module was loaded that then triggers
* module.c elf curiosities
*/
LOG(THREAD, LOG_SYSCALLS, 2, "SYS_open %s => %d\n",
dcontext->sys_param0, (int)result);
}
break;
}
# endif
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap:
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
uint flags;
DEBUG_DECLARE(const char *map_type;)
RSTATS_INC(num_app_mmaps);
base = (app_pc) MCXT_SYSCALL_RES(mc); /* For mmap, it's NOT arg->addr! */
/* mmap isn't simply a user-space wrapper for mmap2. It's called
* directly when dynamically loading an SO, i.e., dlopen(). */
#ifdef LINUX /* MacOS success is in CF */
success = mmap_syscall_succeeded((app_pc)result);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* page-aligned.
*/
ASSERT_CURIOSITY(!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 &&
ALIGNED(base, PAGE_SIZE)));
#else
ASSERT_CURIOSITY(!success || ALIGNED(base, PAGE_SIZE));
#endif
if (!success)
goto exit_post_system_call;
#if defined(LINUX) && !defined(X64) && !defined(ARM)
if (sysnum == SYS_mmap) {
/* The syscall succeeded so the read of 'arg' should be
* safe. */
mmap_arg_struct_t *arg = (mmap_arg_struct_t *) dcontext->sys_param0;
size = (size_t) arg->len;
prot = (uint) arg->prot;
flags = (uint) arg->flags;
DEBUG_DECLARE(map_type = "mmap";)
}
else {
#endif
size = (size_t) dcontext->sys_param1;
prot = (uint) dcontext->sys_param2;
flags = (uint) dcontext->sys_param3;
DEBUG_DECLARE(map_type = IF_X64_ELSE("mmap2","mmap");)
#if defined(LINUX) && !defined(X64) && !defined(ARM)
}
#endif
process_mmap(dcontext, base, size, prot, flags _IF_DEBUG(map_type));
break;
}
case SYS_munmap: {
app_pc addr = (app_pc) dcontext->sys_param0;
size_t len = (size_t) dcontext->sys_param1;
/* We assumed in pre_system_call() that the unmap would succeed
* and flushed fragments and removed the region from exec areas.
* If the unmap failed, we re-add the region to exec areas.
*
* The same logic can be used on Windows (but isn't yet).
*/
/* FIXME There are shortcomings to the approach. If another thread
* executes in the region after our pre_system_call processing
* but before the re-add below, it will get a security violation.
* That's less than ideal but at least isn't a security hole.
* The overall shortcoming is that we lose the state from our
* stateful security policies -- future exec list, tables used
* for RCT (.C/.E/.F) -- which can't be easily restored. Also,
* the re-add could add a region that wasn't on the exec list
* previously.
*
* See case 7559 for a better approach.
*/
if (!success) {
dr_mem_info_t info;
/* must go to os to get real memory since we already removed */
DEBUG_DECLARE(ok =)
query_memory_ex_from_os(addr, &info);
ASSERT(ok);
app_memory_allocation(dcontext, addr, len, info.prot,
info.type == DR_MEMTYPE_IMAGE
_IF_DEBUG("failed munmap"));
IF_NO_MEMQUERY(memcache_update_locked((app_pc)ALIGN_BACKWARD(addr,
PAGE_SIZE),
(app_pc)ALIGN_FORWARD(addr + len,
PAGE_SIZE),
info.prot,
info.type, false/*add back*/));
}
break;
}
#ifdef LINUX
case SYS_mremap: {
app_pc old_base = (app_pc) dcontext->sys_param0;
size_t old_size = (size_t) dcontext->sys_param1;
base = (app_pc) MCXT_SYSCALL_RES(mc);
size = (size_t) dcontext->sys_param2;
/* even if no shift, count as munmap plus mmap */
RSTATS_INC(num_app_munmaps);
RSTATS_INC(num_app_mmaps);
success = handle_app_mremap(dcontext, base, size, old_base, old_size,
/* i#173: use memory prot and type
* obtained from pre_system_call
*/
(uint) dcontext->sys_param3,
(uint) dcontext->sys_param4);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* is page-aligned.
*/
ASSERT_CURIOSITY(!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 &&
ALIGNED(base, PAGE_SIZE)));
if (!success)
goto exit_post_system_call;
break;
}
#endif
case SYS_mprotect: {
base = (app_pc) dcontext->sys_param0;
size = dcontext->sys_param1;
prot = dcontext->sys_param2;
#ifdef VMX86_SERVER
/* PR 475111: workaround for PR 107872 */
if (os_in_vmkernel_userworld() &&
result == -EBUSY && prot == PROT_NONE) {
result = mprotect_syscall(base, size, PROT_READ);
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, result);
success = (result >= 0);
LOG(THREAD, LOG_VMAREAS, 1,
"re-doing mprotect -EBUSY for "PFX"-"PFX" => %d\n",
base, base + size, (int)result);
SYSLOG_INTERNAL_WARNING_ONCE("re-doing mprotect for PR 475111, PR 107872");
}
#endif
/* FIXME i#143: we need to tweak the returned oldprot for
* writable areas we've made read-only
*/
if (!success) {
uint memprot = 0;
/* Revert the prot bits if needed. */
if (!get_memory_info_from_os(base, NULL, NULL, &memprot))
memprot = PROT_NONE;
LOG(THREAD, LOG_SYSCALLS, 3,
"syscall: mprotect failed: "PFX"-"PFX" prot->%d\n",
base, base+size, osprot_to_memprot(prot));
LOG(THREAD, LOG_SYSCALLS, 3, "\told prot->%d\n", memprot);
if (prot != memprot_to_osprot(memprot)) {
/* We're trying to reverse the prot change, assuming that
* this action doesn't have any unexpected side effects
* when doing so (such as not reversing some bit of internal
* state).
*/
uint new_memprot;
DEBUG_DECLARE(uint res =)
app_memory_protection_change(dcontext, base, size,
osprot_to_memprot(prot),
&new_memprot,
NULL);
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT(res == DO_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE);
/* PR 410921 - Revert the changes to all-mems list.
* FIXME: This fix assumes the whole region had the prot &
* type, which is true in the cases we have seen so far, but
* theoretically may not be true. If it isn't true, multiple
* memory areas with different types/protections might have
* been changed in pre_system_call(), so will have to keep a
* list of all vmareas changed. This might be expensive for
* each mprotect syscall to guard against a rare theoretical bug.
*/
ASSERT_CURIOSITY(!dcontext->mprot_multi_areas);
IF_NO_MEMQUERY(memcache_update_locked(base, base + size,
memprot, -1/*type unchanged*/,
true/*exists*/));
}
}
break;
}
#ifdef ANDROID
case SYS_prctl: {
int code = (int) dcontext->sys_param0;
int subcode = (ulong) dcontext->sys_param1;
if (success && code == PR_SET_VMA && subcode == PR_SET_VMA_ANON_NAME) {
byte *addr = (byte *) dcontext->sys_param2;
size_t len = (size_t) dcontext->sys_param3;
IF_DEBUG(const char *comment = (const char *) dcontext->sys_param4;)
uint memprot = 0;
if (!get_memory_info_from_os(addr, NULL, NULL, &memprot))
memprot = MEMPROT_NONE;
/* We're post-syscall so from_os should match the prctl */
ASSERT((comment == NULL && !TEST(MEMPROT_HAS_COMMENT, memprot)) ||
(comment != NULL && TEST(MEMPROT_HAS_COMMENT, memprot)));
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prctl PR_SET_VMA_ANON_NAME base="PFX" size="PFX" comment=%s\n",
addr, len, comment == NULL ? "<null>" : comment);
IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, memprot,
-1/*type unchanged*/, true/*exists*/));
}
break;
}
#endif
#ifdef LINUX
case SYS_brk: {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* This code should work regardless of whether syscall failed
* (if it failed, the old break will be returned). We stored
* the old break in sys_param1 in pre-syscall.
*/
app_pc old_brk = (app_pc) dcontext->sys_param1;
app_pc new_brk = (app_pc) result;
DEBUG_DECLARE(app_pc req_brk = (app_pc) dcontext->sys_param0;);
ASSERT(!DYNAMO_OPTION(emulate_brk)); /* shouldn't get here */
# ifdef DEBUG
if (DYNAMO_OPTION(early_inject) &&
req_brk != NULL /* Ignore calls that don't increase brk. */) {
DO_ONCE({
ASSERT_CURIOSITY(new_brk > old_brk && "i#1004: first brk() "
"allocation failed with -early_inject");
});
}
# endif
handle_app_brk(dcontext, NULL, old_brk, new_brk);
break;
}
#endif
/****************************************************************************/
/* SPAWNING -- fork mostly handled above */
#ifdef LINUX
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c */
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone returned "PFX"\n",
MCXT_SYSCALL_RES(mc));
/* We switch the lib tls segment back to dr's privlib segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (was_thread_create_syscall(dcontext)) {
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false/*to dr*/);
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#elif defined(MACOS) && !defined(X64)
case SYS_bsdthread_create: {
/* restore stack values we clobbered */
ASSERT(*sys_param_addr(dcontext, 0) == (reg_t) new_bsdthread_intercept);
*sys_param_addr(dcontext, 0) = dcontext->sys_param0;
*sys_param_addr(dcontext, 1) = dcontext->sys_param1;
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork returned "PFX"\n",
MCXT_SYSCALL_RES(mc));
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork returned "PFX"\n",
MCXT_SYSCALL_RES(mc));
IF_LINUX(ASSERT(was_thread_create_syscall(dcontext)));
/* restore xsp in parent */
LOG(THREAD, LOG_SYSCALLS, 2,
"vfork: restoring xsp from "PFX" to "PFX"\n",
mc->xsp, dcontext->sys_param1);
mc->xsp = dcontext->sys_param1;
if (MCXT_SYSCALL_RES(mc) != 0) {
/* We switch the lib tls segment back to dr's segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_switch_lib_tls(dcontext, false/*to dr*/);
}
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#endif
case SYS_execve: {
/* if we get here it means execve failed (doesn't return on success) */
success = false;
mark_thread_execve(dcontext->thread_record, false);
ASSERT(result < 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: execve failed\n");
handle_execve_post(dcontext);
/* Don't 'break' as we have an ASSERT(success) just below
* the switch(). */
goto exit_post_system_call;
break; /* unnecessary but good form so keep it */
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction,SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
int sig = (int) dcontext->sys_param0;
const kernel_sigaction_t *act =
(const kernel_sigaction_t *) dcontext->sys_param1;
prev_sigaction_t *oact = (prev_sigaction_t *) dcontext->sys_param2;
size_t sigsetsize = (size_t) dcontext->sys_param3;
uint res;
res = handle_post_sigaction(dcontext, success, sig, act, oact, sigsetsize);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction => %d\n",
IF_MACOS_ELSE("","rt_"), -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
int sig = (int) dcontext->sys_param0;
const old_sigaction_t *act = (const old_sigaction_t *) dcontext->sys_param1;
old_sigaction_t *oact = (old_sigaction_t *) dcontext->sys_param2;
uint res = handle_post_old_sigaction(dcontext, success, sig, act, oact);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction => %d\n", -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#endif
case IF_MACOS_ELSE(SYS_sigprocmask,SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
handle_post_sigprocmask(dcontext, (int) dcontext->sys_param0,
(kernel_sigset_t *) dcontext->sys_param1,
(kernel_sigset_t *) dcontext->sys_param2,
(size_t) dcontext->sys_param3);
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: /* 119 */
#endif
case IF_MACOS_ELSE(SYS_sigreturn,SYS_rt_sigreturn): /* 173 */
/* there is no return value: it's just the value of eax, so avoid
* assert below
*/
success = true;
break;
case SYS_setitimer: /* 104 */
handle_post_setitimer(dcontext, success, (int) dcontext->sys_param0,
(const struct itimerval *) dcontext->sys_param1,
(struct itimerval *) dcontext->sys_param2);
break;
case SYS_getitimer: /* 105 */
handle_post_getitimer(dcontext, success, (int) dcontext->sys_param0,
(struct itimerval *) dcontext->sys_param1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
handle_post_alarm(dcontext, success, (unsigned int) dcontext->sys_param0);
break;
#endif
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl: {
if (success && INTERNAL_OPTION(mangle_app_seg)) {
tls_handle_post_arch_prctl(dcontext, dcontext->sys_param0,
dcontext->sys_param1);
}
break;
}
#endif
/****************************************************************************/
/* FILES */
#ifdef SYS_dup2
case SYS_dup2:
IF_LINUX(case SYS_dup3:) {
# ifdef LINUX
if (success)
signal_handle_dup(dcontext, (file_t) sys_param(dcontext, 1), (file_t) result);
# endif
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
#ifdef LINUX /* Linux-only since only for signalfd */
if (success) {
file_t fd = (long) dcontext->sys_param0;
int cmd = (int) dcontext->sys_param1;
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC))
signal_handle_dup(dcontext, fd, (file_t) result);
}
break;
#endif
}
case IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)): {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
/* we stole some space: hide it from app */
struct rlimit *rlim = (struct rlimit *) dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
/* Old struct w/ smaller fields */
case SYS_getrlimit: {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
struct compat_rlimit *rlim = (struct compat_rlimit *) dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#endif
#ifdef LINUX
case SYS_prlimit64: {
int resource = dcontext->sys_param1;
struct rlimit *rlim = (struct rlimit *) dcontext->sys_param3;
if (success && resource == RLIMIT_NOFILE && rlim != NULL &&
/* XXX: xref pid discussion in pre_system_call SYS_prlimit64 */
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id())) {
safe_write_ex(rlim, sizeof(*rlim), &app_rlimit_nofile, NULL);
}
break;
}
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (success && DYNAMO_OPTION(early_inject)) {
bool is_at = (sysnum == SYS_readlinkat);
/* i#907: /proc/self/exe is a symlink to libdynamorio.so. We need
* to fix it up if the app queries. Any thread id can be passed to
* /proc/%d/exe, so we have to check. We could instead look for
* libdynamorio.so in the result but we've tweaked our injector
* in the past to exec different binaries so this seems more robust.
*/
if (symlink_is_self_exe((const char *)(is_at ? dcontext->sys_param1 :
dcontext->sys_param0))) {
char *tgt = (char *) (is_at ? dcontext->sys_param2 :
dcontext->sys_param1);
size_t tgt_sz = (size_t) (is_at ? dcontext->sys_param3 :
dcontext->sys_param2);
int len = snprintf(tgt, tgt_sz, "%s", get_application_name());
if (len > 0)
set_success_return_val(dcontext, len);
else {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
}
break;
#endif
#ifdef VMX86_SERVER
default:
if (is_vmkuw_sysnum(sysnum)) {
vmkuw_post_system_call(dcontext);
break;
}
#endif
} /* switch */
DODEBUG({
if (ignorable_system_call_normalized(sysnum)) {
STATS_INC(post_syscall_ignorable);
} else {
/* Many syscalls can fail though they aren't ignored. However, they
* shouldn't happen without us knowing about them. See PR 402769
* for SYS_close case.
*/
if (!(success || sysnum == SYS_close ||
IF_MACOS(sysnum == SYS_close_nocancel ||)
dcontext->expect_last_syscall_to_fail)) {
LOG(THREAD, LOG_SYSCALLS, 1,
"Unexpected failure of non-ignorable syscall %d\n", sysnum);
}
}
});
exit_post_system_call:
#ifdef CLIENT_INTERFACE
/* The instrument_post_syscall should be called after DR finishes all
* its operations, since DR needs to know the real syscall results,
* and any changes made by the client are simply to fool the app.
* Also, dr_syscall_invoke_another() needs to set eax, which shouldn't
* affect the result of the 1st syscall. Xref i#1.
*/
/* after restore of xbp so client sees it as though was sysenter */
instrument_post_syscall(dcontext, sysnum);
#endif
dcontext->whereami = old_whereami;
}
/* initializes dynamorio library bounds.
* does not use any heap.
* assumed to be called prior to find_executable_vm_areas.
*/
static int
get_dynamo_library_bounds(void)
{
/* Note that we're not counting DYNAMORIO_PRELOAD_NAME as a DR area, to match
* Windows, so we should unload it like we do there. The other reason not to
* count it is so is_in_dynamo_dll() can be the only exception to the
* never-execute-from-DR-areas list rule
*/
int res;
app_pc check_start, check_end;
char *libdir;
const char *dynamorio_libname;
#ifdef STATIC_LIBRARY
/* We don't know our image name, so look up our bounds with an internal
* address.
*/
dynamorio_libname = NULL;
check_start = (app_pc)&get_dynamo_library_bounds;
#else /* !STATIC_LIBRARY */
# ifdef LINUX
/* PR 361594: we get our bounds from linker-provided symbols.
* Note that referencing the value of these symbols will crash:
* always use the address only.
*/
extern int dynamorio_so_start, dynamorio_so_end;
dynamo_dll_start = (app_pc) &dynamorio_so_start;
dynamo_dll_end = (app_pc) ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE);
# elif defined(MACOS)
dynamo_dll_start = module_dynamorio_lib_base();
# endif
check_start = dynamo_dll_start;
dynamorio_libname = IF_UNIT_TEST_ELSE(UNIT_TEST_EXE_NAME,DYNAMORIO_LIBRARY_NAME);
#endif /* STATIC_LIBRARY */
res = memquery_library_bounds(dynamorio_libname,
&check_start, &check_end,
dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_library_path));
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME" library path: %s\n",
dynamorio_library_path);
snprintf(dynamorio_library_filepath, BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath),
"%s%s", dynamorio_library_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
#if !defined(STATIC_LIBRARY) && defined(LINUX)
ASSERT(check_start == dynamo_dll_start && check_end == dynamo_dll_end);
#elif defined(MACOS)
ASSERT(check_start == dynamo_dll_start);
dynamo_dll_end = check_end;
#else
dynamo_dll_start = check_start;
dynamo_dll_end = check_end;
#endif
LOG(GLOBAL, LOG_VMAREAS, 1, "DR library bounds: "PFX" to "PFX"\n",
dynamo_dll_start, dynamo_dll_end);
ASSERT(res > 0);
/* Issue 20: we need the path to the alt arch */
strncpy(dynamorio_alt_arch_path, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_path));
/* Assumption: libdir name is not repeated elsewhere in path */
libdir = strstr(dynamorio_alt_arch_path, IF_X64_ELSE(DR_LIBDIR_X64, DR_LIBDIR_X86));
if (libdir != NULL) {
const char *newdir = IF_X64_ELSE(DR_LIBDIR_X86, DR_LIBDIR_X64);
/* do NOT place the NULL */
strncpy(libdir, newdir, strlen(newdir));
} else {
SYSLOG_INTERNAL_WARNING("unable to determine lib path for cross-arch execve");
}
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_path);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME" alt arch path: %s\n",
dynamorio_alt_arch_path);
snprintf(dynamorio_alt_arch_filepath,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_filepath),
"%s%s", dynamorio_alt_arch_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_filepath);
return res;
}
/* get full path to our own library, (cached), used for forking and message file name */
char*
get_dynamorio_library_path(void)
{
if (!dynamorio_library_filepath[0]) { /* not cached */
get_dynamo_library_bounds();
}
return dynamorio_library_filepath;
}
#ifdef LINUX
/* Get full path+name of executable file from /proc/self/exe. Returns an empty
* string on error.
* FIXME i#47: This will return DR's path when using early injection.
*/
static char *
read_proc_self_exe(bool ignore_cache)
{
static char exepath[MAXIMUM_PATH];
static bool tried = false;
# ifdef MACOS
ASSERT_NOT_IMPLEMENTED(false);
# endif
if (!tried || ignore_cache) {
tried = true;
/* assume we have /proc/self/exe symlink: could add HAVE_PROC_EXE
* but we have no alternative solution except assuming the first
* /proc/self/maps entry is the executable
*/
ssize_t res;
DEBUG_DECLARE(int len = )
snprintf(exepath, BUFFER_SIZE_ELEMENTS(exepath),
"/proc/%d/exe", get_process_id());
ASSERT(len > 0);
NULL_TERMINATE_BUFFER(exepath);
/* i#960: readlink does not null terminate, so we do it. */
# ifdef SYS_readlink
res = dynamorio_syscall(SYS_readlink, 3, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath)-1);
# else
res = dynamorio_syscall(SYS_readlinkat, 4, AT_FDCWD, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath)-1);
# endif
ASSERT(res < BUFFER_SIZE_ELEMENTS(exepath));
exepath[MAX(res, 0)] = '\0';
NULL_TERMINATE_BUFFER(exepath);
}
return exepath;
}
#endif /* LINUX */
app_pc
get_application_base(void)
{
if (executable_start == NULL) {
#ifdef HAVE_MEMINFO
/* Haven't done find_executable_vm_areas() yet so walk maps ourselves */
const char *name = get_application_name();
if (name != NULL && name[0] != '\0') {
memquery_iter_t iter;
memquery_iterator_start(&iter, NULL, false/*won't alloc*/);
while (memquery_iterator_next(&iter)) {
if (strcmp(iter.comment, name) == 0) {
executable_start = iter.vm_start;
executable_end = iter.vm_end;
break;
}
}
memquery_iterator_stop(&iter);
}
#else
/* We have to fail. Should we dl_iterate this early? */
#endif
}
return executable_start;
}
app_pc
get_application_end(void)
{
if (executable_end == NULL)
get_application_base();
return executable_end;
}
app_pc
get_image_entry()
{
static app_pc image_entry_point = NULL;
if (image_entry_point == NULL && executable_start != NULL) {
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(executable_start);
ASSERT(ma != NULL);
if (ma != NULL) {
ASSERT(executable_start == ma->start);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
image_entry_point = ma->entry_point;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
os_get_module_info_unlock();
}
return image_entry_point;
}
#ifdef DEBUG
void
mem_stats_snapshot()
{
/* FIXME: NYI */
}
#endif
bool
is_in_dynamo_dll(app_pc pc)
{
ASSERT(dynamo_dll_start != NULL);
#ifdef VMX86_SERVER
/* We want to consider vmklib as part of the DR lib for allowing
* execution (_init calls os_in_vmkernel_classic()) and for
* reporting crashes as our fault
*/
if (vmk_in_vmklib(pc))
return true;
#endif
return (pc >= dynamo_dll_start && pc < dynamo_dll_end);
}
app_pc
get_dynamorio_dll_start()
{
if (dynamo_dll_start == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_start != NULL);
return dynamo_dll_start;
}
app_pc
get_dynamorio_dll_end()
{
if (dynamo_dll_end == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_end != NULL);
return dynamo_dll_end;
}
app_pc
get_dynamorio_dll_preferred_base()
{
/* on Linux there is no preferred base if we're PIC,
* therefore is always equal to dynamo_dll_start */
return get_dynamorio_dll_start();
}
/* assumed to be called after find_dynamo_library_vm_areas() */
int
find_executable_vm_areas(void)
{
int count = 0;
#ifdef MACOS
app_pc shared_start, shared_end;
bool have_shared = module_dyld_shared_region(&shared_start, &shared_end);
#endif
#ifdef RETURN_AFTER_CALL
dcontext_t *dcontext = get_thread_private_dcontext();
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
#endif
#ifndef HAVE_MEMINFO_QUERY
/* We avoid tracking the innards of vmheap for all_memory_areas by
* adding a single no-access region for the whole vmheap.
* Queries from heap routines use _from_os.
* Queries in check_thread_vm_area are fine getting "noaccess": wants
* any DR memory not on exec areas list to be noaccess.
* Queries from clients: should be ok to hide innards. Marking noaccess
* should be safer than marking free, as unruly client might try to mmap
* something in the free space: better to have it think it's reserved but
* not yet used memory. FIXME: we're not marking beyond-vmheap DR regions
* as noaccess!
*/
byte *our_heap_start, *our_heap_end;
get_vmm_heap_bounds(&our_heap_start, &our_heap_end);
if (our_heap_end - our_heap_start > 0) {
memcache_update_locked(our_heap_start, our_heap_end, MEMPROT_NONE,
DR_MEMTYPE_DATA, false/*!exists*/);
}
#endif
#ifndef HAVE_MEMINFO
count = find_vm_areas_via_probe();
#else
memquery_iter_t iter;
memquery_iterator_start(&iter, NULL, true/*may alloc*/);
while (memquery_iterator_next(&iter)) {
bool image = false;
size_t size = iter.vm_end - iter.vm_start;
/* i#479, hide private module and match Windows's behavior */
bool skip = dynamo_vm_area_overlap(iter.vm_start, iter.vm_end) &&
!is_in_dynamo_dll(iter.vm_start) /* our own text section is ok */
/* client lib text section is ok (xref i#487) */
IF_CLIENT_INTERFACE(&& !is_in_client_lib(iter.vm_start));
DEBUG_DECLARE(const char *map_type = "Private");
/* we can't really tell what's a stack and what's not, but we rely on
* our passing NULL preventing rwx regions from being added to executable
* or future list, even w/ -executable_if_alloc
*/
LOG(GLOBAL, LOG_VMAREAS, 2,
"start="PFX" end="PFX" prot=%x comment=%s\n",
iter.vm_start, iter.vm_end, iter.prot, iter.comment);
/* Issue 89: the vdso might be loaded inside ld.so as below,
* which causes ASSERT_CURIOSITY in mmap_check_for_module_overlap fail.
* b7fa3000-b7fbd000 r-xp 00000000 08:01 108679 /lib/ld-2.8.90.so
* b7fbd000-b7fbe000 r-xp b7fbd000 00:00 0 [vdso]
* b7fbe000-b7fbf000 r--p 0001a000 08:01 108679 /lib/ld-2.8.90.so
* b7fbf000-b7fc0000 rw-p 0001b000 08:01 108679 /lib/ld-2.8.90.so
* So we always first check if it is a vdso page before calling
* mmap_check_for_module_overlap.
* Update: with i#160/PR 562667 handling non-contiguous modules like
* ld.so we now gracefully handle other objects like vdso in gaps in
* module, but it's simpler to leave this ordering here.
*/
if (skip) {
/* i#479, hide private module and match Windows's behavior */
LOG(GLOBAL, LOG_VMAREAS, 2, PFX"-"PFX" skipping: internal DR region\n",
iter.vm_start, iter.vm_end);
#ifdef MACOS
} else if (have_shared && iter.vm_start >= shared_start &&
iter.vm_start < shared_end) {
/* Skip modules we happen to find inside the dyld shared cache,
* as we'll fail to identify the library. We add them
* in module_walk_dyld_list instead.
*/
image = true;
#endif
} else if (strncmp(iter.comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0 ||
IF_X64_ELSE(strncmp(iter.comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0,
/* Older kernels do not label it as "[vdso]", but it is hardcoded there */
/* 32-bit */
iter.vm_start == VSYSCALL_PAGE_START_HARDCODED)) {
# ifndef X64
/* We assume no vsyscall page for x64; thus, checking the
* hardcoded address shouldn't have any false positives.
*/
ASSERT(iter.vm_end - iter.vm_start == PAGE_SIZE ||
/* i#1583: recent kernels have 2-page vdso */
iter.vm_end - iter.vm_start == 2*PAGE_SIZE);
ASSERT(!dynamo_initialized); /* .data should be +w */
/* we're not considering as "image" even if part of ld.so (xref i#89) and
* thus we aren't adjusting our code origins policies to remove the
* vsyscall page exemption.
*/
DODEBUG({ map_type = "VDSO"; });
/* On re-attach, the vdso can be split into two entries (from DR's hook),
* so take just the first one as the start (xref i#2157).
*/
if (vsyscall_page_start == NULL)
vsyscall_page_start = iter.vm_start;
if (vdso_page_start == NULL)
vdso_page_start = vsyscall_page_start; /* assume identical for now */
LOG(GLOBAL, LOG_VMAREAS, 1, "found vsyscall page @ "PFX" %s\n",
vsyscall_page_start, iter.comment);
# else
/* i#172
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso]
* but it is readable indeed.
*/
/* i#430
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
* but it is readable indeed.
*/
if (!TESTALL((PROT_READ|PROT_EXEC), iter.prot))
iter.prot |= (PROT_READ|PROT_EXEC);
/* i#1908: vdso and vsyscall pages are now split */
if (strncmp(iter.comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0)
vdso_page_start = iter.vm_start;
else if (strncmp(iter.comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0)
vsyscall_page_start = iter.vm_start;
# endif
} else if (mmap_check_for_module_overlap(iter.vm_start, size,
TEST(MEMPROT_READ, iter.prot),
iter.inode, false)) {
/* we already added the whole image region when we hit the first map for it */
image = true;
DODEBUG({ map_type = "ELF SO"; });
} else if (TEST(MEMPROT_READ, iter.prot) &&
module_is_header(iter.vm_start, size)) {
size_t image_size = size;
app_pc mod_base, mod_first_end, mod_max_end;
char *exec_match;
bool found_exec = false;
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module first segment :\n"
"\t"PFX"-"PFX"%s inode="UINT64_FORMAT_STRING" name=%s\n",
iter.vm_start, iter.vm_end, TEST(MEMPROT_EXEC, iter.prot) ? " +x": "",
iter.inode, iter.comment);
#ifdef LINUX
ASSERT_CURIOSITY(iter.inode != 0); /* mapped images should have inodes */
#endif
ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */
/* Get size by walking the program headers. This includes .bss. */
if (module_walk_program_headers(iter.vm_start, size, false,
true, /* i#1589: ld.so relocated .dynamic */
&mod_base, &mod_first_end,
&mod_max_end, NULL, NULL)) {
image_size = mod_max_end - mod_base;
} else {
ASSERT_NOT_REACHED();
}
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module total module :\n"
"\t"PFX"-"PFX" inode="UINT64_FORMAT_STRING" name=%s\n",
iter.vm_start, iter.vm_start+image_size, iter.inode, iter.comment);
/* look for executable */
#ifdef LINUX
exec_match = get_application_name();
if (exec_match != NULL && exec_match[0] != '\0')
found_exec = (strcmp(iter.comment, exec_match) == 0);
#else
/* We don't have a nice normalized name: it can have ./ or ../ inside
* it. But, we can distinguish an exe from a lib here, even for PIE,
* so we go with that plus a basename comparison.
*/
exec_match = (char *) get_application_short_name();
if (module_is_executable(iter.vm_start) &&
exec_match != NULL && exec_match[0] != '\0') {
const char *iter_basename = strrchr(iter.comment, '/');
if (iter_basename == NULL)
iter_basename = iter.comment;
else
iter_basename++;
found_exec = (strcmp(iter_basename, exec_match) == 0);
}
#endif
if (found_exec) {
if (executable_start == NULL)
executable_start = iter.vm_start;
else
ASSERT(iter.vm_start == executable_start);
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found executable %s @"PFX"-"PFX" %s\n", get_application_name(),
iter.vm_start, iter.vm_start+image_size, iter.comment);
}
/* We don't yet know whether contiguous so we have to settle for the
* first segment's size. We'll update it in module_list_add().
*/
module_list_add(iter.vm_start, mod_first_end - mod_base,
false, iter.comment, iter.inode);
#ifdef MACOS
/* look for dyld */
if (strcmp(iter.comment, "/usr/lib/dyld") == 0)
module_walk_dyld_list(iter.vm_start);
#endif
} else if (iter.inode != 0) {
DODEBUG({ map_type = "Mapped File"; });
}
/* add all regions (incl. dynamo_areas and stack) to all_memory_areas */
LOG(GLOBAL, LOG_VMAREAS, 4,
"find_executable_vm_areas: adding: "PFX"-"PFX" prot=%d\n",
iter.vm_start, iter.vm_end, iter.prot);
IF_NO_MEMQUERY(memcache_update_locked(iter.vm_start, iter.vm_end, iter.prot,
image ? DR_MEMTYPE_IMAGE :
DR_MEMTYPE_DATA, false/*!exists*/));
/* FIXME: best if we could pass every region to vmareas, but
* it has no way of determining if this is a stack b/c we don't have
* a dcontext at this point -- so we just don't pass the stack
*/
if (!skip /* i#479, hide private module and match Windows's behavior */ &&
app_memory_allocation(NULL, iter.vm_start, (iter.vm_end - iter.vm_start),
iter.prot, image _IF_DEBUG(map_type))) {
count++;
}
}
memquery_iterator_stop(&iter);
#endif /* !HAVE_MEMINFO */
#ifndef HAVE_MEMINFO_QUERY
DOLOG(4, LOG_VMAREAS, memcache_print(GLOBAL,"init: all memory areas:\n"););
#endif
#ifdef RETURN_AFTER_CALL
/* Find the bottom of the stack of the initial (native) entry */
ostd->stack_bottom_pc = find_stack_bottom();
LOG(THREAD, LOG_ALL, 1, "Stack bottom pc = "PFX"\n", ostd->stack_bottom_pc);
#endif
/* now that we've walked memory print all modules */
LOG(GLOBAL, LOG_VMAREAS, 2, "Module list after memory walk\n");
DOLOG(1, LOG_VMAREAS, { print_modules(GLOBAL, DUMP_NOT_XML); });
STATS_ADD(num_app_code_modules, count);
/* now that we have the modules set up, query libc */
get_libc_errno_location(true/*force init*/);
return count;
}
/* initializes dynamorio library bounds.
* does not use any heap.
* assumed to be called prior to find_executable_vm_areas.
*/
int
find_dynamo_library_vm_areas(void)
{
#ifndef STATIC_LIBRARY
/* We didn't add inside get_dynamo_library_bounds b/c it was called pre-alloc.
* We don't bother to break down the sub-regions.
* Assumption: we don't need to have the protection flags for DR sub-regions.
* For static library builds, DR's code is in the exe and isn't considered
* to be a DR area.
*/
add_dynamo_vm_area(get_dynamorio_dll_start(), get_dynamorio_dll_end(),
MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC,
true /* from image */ _IF_DEBUG(dynamorio_library_filepath));
#endif
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld())
vmk_add_vmklib_to_dynamo_areas();
#endif
return 1;
}
bool
get_stack_bounds(dcontext_t *dcontext, byte **base, byte **top)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
if (ostd->stack_base == NULL) {
/* initialize on-demand since don't have app esp handy in os_thread_init()
* FIXME: the comment here -- ignoring it for now, if hit cases confirming
* it the right thing will be to merge adjacent rwx regions and assume
* their union is the stack -- otherwise have to have special stack init
* routine called from x86.asm new_thread_dynamo_start and internal_dynamo_start,
* and the latter is not a do-once...
*/
size_t size = 0;
bool ok;
/* store stack info at thread startup, since stack can get fragmented in
* /proc/self/maps w/ later mprotects and it can be hard to piece together later
*/
if (IF_MEMQUERY_ELSE(false, DYNAMO_OPTION(use_all_memory_areas))) {
ok = get_memory_info((app_pc)get_mcontext(dcontext)->xsp,
&ostd->stack_base, &size, NULL);
} else {
ok = get_memory_info_from_os((app_pc)get_mcontext(dcontext)->xsp,
&ostd->stack_base, &size, NULL);
}
ASSERT(ok);
ostd->stack_top = ostd->stack_base + size;
LOG(THREAD, LOG_THREADS, 1, "App stack is "PFX"-"PFX"\n",
ostd->stack_base, ostd->stack_top);
}
if (base != NULL)
*base = ostd->stack_base;
if (top != NULL)
*top = ostd->stack_top;
return true;
}
#ifdef RETURN_AFTER_CALL
initial_call_stack_status_t
at_initial_stack_bottom(dcontext_t *dcontext, app_pc target_pc)
{
/* We can't rely exclusively on finding the true stack bottom
* b/c we can't always walk the call stack (PR 608990) so we
* use the image entry as our primary trigger
*/
if (executable_start != NULL/*defensive*/ && reached_image_entry_yet()) {
return INITIAL_STACK_EMPTY;
} else {
/* If our stack walk ends early we could have false positives, but
* that's better than false negatives if we miss the image entry
* or we were unable to find the executable_start
*/
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
if (target_pc == ostd->stack_bottom_pc) {
return INITIAL_STACK_BOTTOM_REACHED;
} else {
return INITIAL_STACK_BOTTOM_NOT_REACHED;
}
}
}
#endif /* RETURN_AFTER_CALL */
/* Uses our cached data structures (if in use, else raw query) to retrieve memory info */
bool
query_memory_ex(const byte *pc, OUT dr_mem_info_t *out_info)
{
#ifdef HAVE_MEMINFO_QUERY
return query_memory_ex_from_os(pc, out_info);
#else
return memcache_query_memory(pc, out_info);
#endif
}
bool
query_memory_cur_base(const byte *pc, OUT dr_mem_info_t *info)
{
return query_memory_ex(pc, info);
}
/* Use our cached data structures (if in use, else raw query) to retrieve memory info */
bool
get_memory_info(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (is_vmm_reserved_address((byte*)pc, 1)) {
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
} else {
if (!query_memory_ex(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
}
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* We assume that this routine might be called instead of query_memory_ex()
* b/c the caller is in a fragile location and cannot acquire locks, so
* we try to do the same here.
*/
bool
query_memory_ex_from_os(const byte *pc, OUT dr_mem_info_t *info)
{
bool have_type = false;
bool res = memquery_from_os(pc, info, &have_type);
if (!res) {
/* No other failure types for now */
info->type = DR_MEMTYPE_ERROR;
} else if (res && !have_type) {
/* We pass 0 instead of info->size b/c even if marked as +r we can still
* get SIGBUS if beyond end of mmapped file: not uncommon if querying
* in middle of library load before .bss fully set up (PR 528744).
* However, if there is no fault handler, is_elf_so_header's safe_read will
* recurse to here, so in that case we use info->size but we assume
* it's only at init or exit and so not in the middle of a load
* and less likely to be querying a random mmapped file.
* The cleaner fix is to allow safe_read to work w/o a dcontext or
* fault handling: i#350/PR 529066.
*/
if (TEST(MEMPROT_READ, info->prot) &&
module_is_header(info->base_pc, fault_handling_initialized ? 0 : info->size))
info->type = DR_MEMTYPE_IMAGE;
else {
/* FIXME: won't quite match find_executable_vm_areas marking as
* image: can be doubly-mapped so; don't want to count vdso; etc.
*/
info->type = DR_MEMTYPE_DATA;
}
}
return res;
}
bool
get_memory_info_from_os(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* in utils.c, exported only for our hack! */
extern void deadlock_avoidance_unlock(mutex_t *lock, bool ownable);
void
mutex_wait_contended_lock(mutex_t *lock)
{
#ifdef CLIENT_INTERFACE
dcontext_t *dcontext = get_thread_private_dcontext();
bool set_client_safe_for_synch =
((dcontext != NULL) && IS_CLIENT_THREAD(dcontext) &&
((mutex_t *)dcontext->client_data->client_grab_mutex == lock));
#endif
/* i#96/PR 295561: use futex(2) if available */
if (ksynch_kernel_support()) {
/* Try to get the lock. If already held, it's fine to store any value
* > LOCK_SET_STATE (we don't rely on paired incs/decs) so that
* the next unlocker will call mutex_notify_released_lock().
*/
ptr_int_t res;
#ifndef LINUX /* we actually don't use this for Linux: see below */
KSYNCH_TYPE *event = mutex_get_contended_event(lock);
ASSERT(event != NULL && ksynch_var_initialized(event));
#endif
while (atomic_exchange_int(&lock->lock_requests, LOCK_CONTENDED_STATE) !=
LOCK_FREE_STATE) {
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
#endif
/* Unfortunately the synch semantics are different for Linux vs Mac.
* We have to use lock_requests as the futex to avoid waiting if
* lock_requests changes, while on Mac the underlying synch prevents
* a wait there.
*/
#ifdef LINUX
/* We'll abort the wait if lock_requests has changed at all.
* We can't have a series of changes that result in no apparent
* change w/o someone acquiring the lock, b/c
* mutex_notify_released_lock() sets lock_requests to LOCK_FREE_STATE.
*/
res = ksynch_wait(&lock->lock_requests, LOCK_CONTENDED_STATE);
#else
res = ksynch_wait(event, 0);
#endif
if (res != 0 && res != -EWOULDBLOCK)
os_thread_yield();
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
#endif
/* we don't care whether properly woken (res==0), var mismatch
* (res==-EWOULDBLOCK), or error: regardless, someone else
* could have acquired the lock, so we try again
*/
}
} else {
/* we now have to undo our earlier request */
atomic_dec_and_test(&lock->lock_requests);
while (!mutex_trylock(lock)) {
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
#endif
os_thread_yield();
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
#endif
}
#ifdef DEADLOCK_AVOIDANCE
/* HACK: trylock's success causes it to do DEADLOCK_AVOIDANCE_LOCK, so to
* avoid two in a row (causes assertion on owner) we unlock here
* In the future we will remove the trylock here and this will go away.
*/
deadlock_avoidance_unlock(lock, true);
#endif
}
return;
}
void
mutex_notify_released_lock(mutex_t *lock)
{
/* i#96/PR 295561: use futex(2) if available. */
if (ksynch_kernel_support()) {
/* Set to LOCK_FREE_STATE to avoid concurrent lock attempts from
* resulting in a futex_wait value match w/o anyone owning the lock
*/
lock->lock_requests = LOCK_FREE_STATE;
/* No reason to wake multiple threads: just one */
#ifdef LINUX
ksynch_wake(&lock->lock_requests);
#else
ksynch_wake(&lock->contended_event);
#endif
} /* else nothing to do */
}
/* read_write_lock_t implementation doesn't expect the contention path
helpers to guarantee the lock is held (unlike mutexes) so simple
yields are still acceptable.
*/
void
rwlock_wait_contended_writer(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_writer(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
void
rwlock_wait_contended_reader(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_readers(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
/***************************************************************************/
/* events are un-signaled when successfully waited upon. */
typedef struct linux_event_t {
/* Any function that sets this flag must also notify possibly waiting
* thread(s). See i#96/PR 295561.
*/
KSYNCH_TYPE signaled;
mutex_t lock;
} linux_event_t;
/* FIXME: this routine will need to have a macro wrapper to let us assign different ranks to
* all events for DEADLOCK_AVOIDANCE. Currently a single rank seems to work.
*/
event_t
create_event()
{
event_t e = (event_t) global_heap_alloc(sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
ksynch_init_var(&e->signaled);
ASSIGN_INIT_LOCK_FREE(e->lock, event_lock); /* FIXME: we'll need to pass the event name here */
return e;
}
void
destroy_event(event_t e)
{
DELETE_LOCK(e->lock);
ksynch_free_var(&e->signaled);
global_heap_free(e, sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
}
void
signal_event(event_t e)
{
mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 1);
ksynch_wake(&e->signaled);
LOG(THREAD_GET, LOG_THREADS, 3,"thread "TIDFMT" signalling event "PFX"\n",get_thread_id(),e);
mutex_unlock(&e->lock);
}
void
reset_event(event_t e)
{
mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 0);
LOG(THREAD_GET, LOG_THREADS, 3,"thread "TIDFMT" resetting event "PFX"\n",get_thread_id(),e);
mutex_unlock(&e->lock);
}
void
wait_for_event(event_t e)
{
#ifdef DEBUG
dcontext_t *dcontext = get_thread_private_dcontext();
#endif
/* Use a user-space event on Linux, a kernel event on Windows. */
LOG(THREAD, LOG_THREADS, 3, "thread "TIDFMT" waiting for event "PFX"\n",get_thread_id(),e);
while (true) {
if (ksynch_get_value(&e->signaled) == 1) {
mutex_lock(&e->lock);
if (ksynch_get_value(&e->signaled) == 0) {
/* some other thread beat us to it */
LOG(THREAD, LOG_THREADS, 3, "thread "TIDFMT" was beaten to event "PFX"\n",
get_thread_id(),e);
mutex_unlock(&e->lock);
} else {
/* reset the event */
ksynch_set_value(&e->signaled, 0);
mutex_unlock(&e->lock);
LOG(THREAD, LOG_THREADS, 3,
"thread "TIDFMT" finished waiting for event "PFX"\n", get_thread_id(),e);
return;
}
} else {
/* Waits only if the signaled flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&e->signaled, 0);
}
if (ksynch_get_value(&e->signaled) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
}
/***************************************************************************
* DIRECTORY ITERATOR
*/
/* These structs are written to the buf that we pass to getdents. We can
* iterate them by adding d_reclen to the current buffer offset and interpreting
* that as the next entry.
*/
struct linux_dirent {
#ifdef SYS_getdents
/* Adapted from struct old_linux_dirent in linux/fs/readdir.c: */
unsigned long d_ino;
unsigned long d_off;
unsigned short d_reclen;
char d_name[];
#else
/* Adapted from struct linux_dirent64 in linux/include/linux/dirent.h: */
uint64 d_ino;
int64 d_off;
unsigned short d_reclen;
unsigned char d_type;
char d_name[];
#endif
};
#define CURRENT_DIRENT(iter) \
((struct linux_dirent *)(&iter->buf[iter->off]))
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd)
{
iter->fd = fd;
iter->off = 0;
iter->end = 0;
}
static bool
os_dir_iterator_next(dir_iterator_t *iter)
{
#ifdef MACOS
/* We can use SYS_getdirentries, but do we even need a dir iterator?
* On Linux it's only used to enumerate /proc/pid/task.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
if (iter->off < iter->end) {
/* Have existing dents, get the next offset. */
iter->off += CURRENT_DIRENT(iter)->d_reclen;
ASSERT(iter->off <= iter->end);
}
if (iter->off == iter->end) {
/* Do a getdents syscall. Unlike when reading a file, the kernel will
* not read a partial linux_dirent struct, so we don't need to shift the
* left over bytes to the buffer start. See the getdents manpage for
* the example code that this is based on.
*/
iter->off = 0;
# ifdef SYS_getdents
iter->end = dynamorio_syscall(SYS_getdents, 3, iter->fd, iter->buf,
sizeof(iter->buf));
# else
iter->end = dynamorio_syscall(SYS_getdents64, 3, iter->fd, iter->buf,
sizeof(iter->buf));
# endif
ASSERT(iter->end <= sizeof(iter->buf));
if (iter->end <= 0) { /* No more dents, or error. */
iter->name = NULL;
if (iter->end < 0) {
LOG(GLOBAL, LOG_SYSCALLS, 1,
"getdents syscall failed with errno %d\n", -iter->end);
}
return false;
}
}
iter->name = CURRENT_DIRENT(iter)->d_name;
return true;
#endif
}
/***************************************************************************
* THREAD TAKEOVER
*/
/* Record used to synchronize thread takeover. */
typedef struct _takeover_record_t {
thread_id_t tid;
event_t event;
} takeover_record_t;
/* When attempting thread takeover, we store an array of thread id and event
* pairs here. Each thread we signal is supposed to enter DR control and signal
* this event after it has added itself to all_threads.
*
* XXX: What we really want is to be able to use SYS_rt_tgsigqueueinfo (Linux >=
* 2.6.31) to pass the event_t to each thread directly, rather than using this
* side data structure.
*/
static takeover_record_t *thread_takeover_records;
static uint num_thread_takeover_records;
/* This is the dcontext of the thread that initiated the takeover. We read the
* owning_thread and signal_field threads from it in the signaled threads to
* set up siginfo sharing.
*/
static dcontext_t *takeover_dcontext;
/* Lists active threads in the process.
* XXX: The /proc man page says /proc/pid/task is only available if the main
* thread is still alive, but experiments on 2.6.38 show otherwise.
*/
static thread_id_t *
os_list_threads(dcontext_t *dcontext, uint *num_threads_out)
{
dir_iterator_t iter;
file_t task_dir;
uint tids_alloced = 10;
uint num_threads = 0;
thread_id_t *new_tids;
thread_id_t *tids;
ASSERT(num_threads_out != NULL);
#ifdef MACOS
/* XXX i#58: NYI.
* We may want SYS_proc_info with PROC_INFO_PID_INFO and PROC_PIDLISTTHREADS,
* or is that just BSD threads and instead we want process_set_tasks()
* and task_info() as in 7.3.1.3 in Singh's OSX book?
*/
*num_threads_out = 0;
return NULL;
#endif
tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced,
ACCT_THREAD_MGT, PROTECTED);
task_dir = os_open_directory("/proc/self/task", OS_OPEN_READ);
ASSERT(task_dir != INVALID_FILE);
os_dir_iterator_start(&iter, task_dir);
while (os_dir_iterator_next(&iter)) {
thread_id_t tid;
DEBUG_DECLARE(int r;)
if (strcmp(iter.name, ".") == 0 ||
strcmp(iter.name, "..") == 0)
continue;
IF_DEBUG(r =)
sscanf(iter.name, "%u", &tid);
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to parse /proc/pid/task entry",
r == 1);
if (tid <= 0)
continue;
if (num_threads == tids_alloced) {
/* realloc, essentially. Less expensive than counting first. */
new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced * 2,
ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * tids_alloced);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced,
ACCT_THREAD_MGT, PROTECTED);
tids = new_tids;
tids_alloced *= 2;
}
tids[num_threads++] = tid;
}
ASSERT(iter.end == 0); /* No reading errors. */
os_close(task_dir);
/* realloc back down to num_threads for caller simplicity. */
new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, num_threads,
ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * num_threads);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced,
ACCT_THREAD_MGT, PROTECTED);
tids = new_tids;
*num_threads_out = num_threads;
return tids;
}
/* List the /proc/self/task directory and add all unknown thread ids to the
* all_threads hashtable in dynamo.c. Returns true if we found any unknown
* threads and false otherwise. We assume that since we don't know about them
* they are not under DR and have no dcontexts.
*/
bool
os_take_over_all_unknown_threads(dcontext_t *dcontext)
{
uint i;
uint num_threads;
thread_id_t *tids;
uint threads_to_signal = 0;
mutex_lock(&thread_initexit_lock);
CLIENT_ASSERT(thread_takeover_records == NULL,
"Only one thread should attempt app take over!");
/* Find tids for which we have no thread record, meaning they are not under
* our control. Shift them to the beginning of the tids array.
*/
tids = os_list_threads(dcontext, &num_threads);
if (tids == NULL) {
mutex_unlock(&thread_initexit_lock);
return false; /* have to assume no unknown */
}
for (i = 0; i < num_threads; i++) {
thread_record_t *tr = thread_lookup(tids[i]);
if (tr == NULL ||
/* Re-takeover known threads that are currently native as well.
* XXX i#95: we need a synchall-style loop for known threads as
* they can be in DR for syscall hook handling.
* Update: we now remove the hook for start/stop: but native_exec
* or other individual threads going native could still hit this.
*/
(is_thread_currently_native(tr)
IF_CLIENT_INTERFACE(&& !IS_CLIENT_THREAD(tr->dcontext))))
tids[threads_to_signal++] = tids[i];
}
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: %d threads to take over\n", threads_to_signal);
if (threads_to_signal > 0) {
takeover_record_t *records;
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(dcontext, PTHREAD_CLONE_FLAGS);
/* Create records with events for all the threads we want to signal. */
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: publishing takeover records\n");
records = HEAP_ARRAY_ALLOC(dcontext, takeover_record_t,
threads_to_signal, ACCT_THREAD_MGT,
PROTECTED);
for (i = 0; i < threads_to_signal; i++) {
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: will signal thread "TIDFMT"\n", tids[i]);
records[i].tid = tids[i];
records[i].event = create_event();
}
/* Publish the records and the initial take over dcontext. */
thread_takeover_records = records;
num_thread_takeover_records = threads_to_signal;
takeover_dcontext = dcontext;
/* Signal the other threads. */
for (i = 0; i < threads_to_signal; i++) {
thread_signal(get_process_id(), records[i].tid, SUSPEND_SIGNAL);
}
mutex_unlock(&thread_initexit_lock);
/* Wait for all the threads we signaled. */
ASSERT_OWN_NO_LOCKS();
for (i = 0; i < threads_to_signal; i++) {
wait_for_event(records[i].event);
}
/* Now that we've taken over the other threads, we can safely free the
* records and reset the shared globals.
*/
mutex_lock(&thread_initexit_lock);
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: takeover complete, unpublishing records\n");
thread_takeover_records = NULL;
num_thread_takeover_records = 0;
takeover_dcontext = NULL;
for (i = 0; i < threads_to_signal; i++) {
destroy_event(records[i].event);
}
HEAP_ARRAY_FREE(dcontext, records, takeover_record_t,
threads_to_signal, ACCT_THREAD_MGT, PROTECTED);
}
mutex_unlock(&thread_initexit_lock);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, num_threads,
ACCT_THREAD_MGT, PROTECTED);
return threads_to_signal > 0;
}
bool
os_thread_re_take_over(void)
{
#ifdef X86
/* i#2089: is_thread_initialized() will fail for a currently-native app.
* We bypass the magic field checks here of is_thread_tls_initialized().
* XXX: should this be inside is_thread_initialized()? But that may mislead
* other callers: the caller has to restore the TLs. Some old code also
* used get_thread_private_dcontext() being NULL to indicate an unknown thread:
* that should also call here.
*/
if (!is_thread_initialized() && is_thread_tls_allocated()) {
/* It's safe to call thread_lookup() for ourself. */
thread_record_t *tr = thread_lookup(get_sys_thread_id());
if (tr != NULL) {
ASSERT(is_thread_currently_native(tr));
LOG(GLOBAL, LOG_THREADS, 1,
"\tretakeover for cur-native thread "TIDFMT"\n", get_sys_thread_id());
LOG(tr->dcontext->logfile, LOG_THREADS, 1,
"\nretakeover for cur-native thread "TIDFMT"\n", get_sys_thread_id());
os_swap_dr_tls(tr->dcontext, false/*to dr*/);
ASSERT(is_thread_initialized());
return true;
}
}
#endif
return false;
}
/* Takes over the current thread from the signal handler. We notify the thread
* that signaled us by signalling our event in thread_takeover_records.
*/
void
os_thread_take_over(priv_mcontext_t *mc, kernel_sigset_t *sigset)
{
uint i;
thread_id_t mytid;
dcontext_t *dcontext;
priv_mcontext_t *dc_mc;
event_t event = NULL;
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: received signal in thread "TIDFMT"\n", get_sys_thread_id());
/* Do standard DR thread initialization. Mirrors code in
* create_clone_record and new_thread_setup, except we're not putting a
* clone record on the dstack.
*/
os_thread_re_take_over();
if (!is_thread_initialized()) {
IF_DEBUG(int r =)
dynamo_thread_init(NULL, mc _IF_CLIENT_INTERFACE(false));
ASSERT(r == SUCCESS);
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
share_siginfo_after_take_over(dcontext, takeover_dcontext);
} else {
/* Re-takeover a thread that we let go native */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
}
signal_set_mask(dcontext, sigset);
dynamo_thread_under_dynamo(dcontext);
dc_mc = get_mcontext(dcontext);
*dc_mc = *mc;
dcontext->whereami = WHERE_APP;
dcontext->next_tag = mc->pc;
/* Wake up the thread that initiated the take over. */
mytid = get_thread_id();
ASSERT(thread_takeover_records != NULL);
for (i = 0; i < num_thread_takeover_records; i++) {
if (thread_takeover_records[i].tid == mytid) {
event = thread_takeover_records[i].event;
break;
}
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "mytid not present in takeover records!",
event != NULL);
signal_event(event);
DOLOG(2, LOG_TOP, {
byte *cur_esp;
GET_STACK_PTR(cur_esp);
LOG(THREAD, LOG_TOP, 2, "%s: next_tag="PFX", cur xsp="PFX", mc->xsp="PFX"\n",
__FUNCTION__, dcontext->next_tag, cur_esp, mc->xsp);
});
/* Start interpreting from the signal context. */
call_switch_stack(dcontext, dcontext->dstack, (void(*)(void*))dispatch,
NULL/*not on initstack*/, false/*shouldn't return*/);
ASSERT_NOT_REACHED();
}
bool
os_thread_take_over_suspended_native(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
if (!is_thread_currently_native(dcontext->thread_record) ||
ksynch_get_value(&ostd->suspended) < 0)
return false;
/* Thread is sitting in suspend signal loop so we just set a flag
* for when it resumes:
*/
/* XXX: there's no event for a client to trigger this on so not yet
* tested. i#721 may help.
*/
ASSERT_NOT_TESTED();
ostd->retakeover = true;
return true;
}
/* Called for os-specific takeover of a secondary thread from the one
* that called dr_app_setup().
*/
void
os_thread_take_over_secondary(dcontext_t *dcontext)
{
thread_record_t **list;
int num_threads;
int i;
/* We want to share with the thread that called dr_app_setup. */
mutex_lock(&thread_initexit_lock);
get_list_of_threads(&list, &num_threads);
ASSERT(num_threads >= 1);
for (i = 0; i < num_threads; i++) {
/* Find a thread that's already set up */
if (is_thread_signal_info_initialized(list[i]->dcontext))
break;
}
ASSERT(i < num_threads);
ASSERT(list[i]->dcontext != dcontext);
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(list[i]->dcontext, PTHREAD_CLONE_FLAGS);
share_siginfo_after_take_over(dcontext, list[i]->dcontext);
mutex_unlock(&thread_initexit_lock);
global_heap_free(list, num_threads*sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
}
/***************************************************************************/
uint
os_random_seed(void)
{
uint seed;
/* reading from /dev/urandom for a non-blocking random */
int urand = os_open("/dev/urandom", OS_OPEN_READ);
DEBUG_DECLARE(int read = )os_read(urand, &seed, sizeof(seed));
ASSERT(read == sizeof(seed));
os_close(urand);
return seed;
}
#ifdef RCT_IND_BRANCH
/* Analyze a range in a possibly new module
* return false if not a code section in a module
* otherwise returns true and adds all valid targets for rct_ind_branch_check
*/
bool
rct_analyze_module_at_violation(dcontext_t *dcontext, app_pc target_pc)
{
/* FIXME: note that this will NOT find the data section corresponding to the given PC
* we don't yet have a corresponding get_allocation_size or an ELF header walk routine
* on linux
*/
app_pc code_start;
size_t code_size;
uint prot;
if (!get_memory_info(target_pc, &code_start, &code_size, &prot))
return false;
/* TODO: in almost all cases expect the region at module_base+module_size to be
* the corresponding data section.
* Writable yet initialized data indeed needs to be processed.
*/
if (code_size > 0) {
app_pc code_end = code_start + code_size;
app_pc data_start;
size_t data_size;
ASSERT(TESTALL(MEMPROT_READ|MEMPROT_EXEC, prot)); /* code */
if (!get_memory_info(code_end, &data_start, &data_size, &prot))
return false;
ASSERT(data_start == code_end);
ASSERT(TESTALL(MEMPROT_READ|MEMPROT_WRITE, prot)); /* data */
app_pc text_start = code_start;
app_pc text_end = data_start + data_size;
/* TODO: performance: should do this only in case relocation info is not present */
DEBUG_DECLARE(uint found = )
find_address_references(dcontext, text_start, text_end,
code_start, code_end);
LOG(GLOBAL, LOG_RCT, 2, PFX"-"PFX" : %d ind targets of %d code size",
text_start, text_end,
found, code_size);
return true;
}
return false;
}
#ifdef X64
bool
rct_add_rip_rel_addr(dcontext_t *dcontext, app_pc tgt _IF_DEBUG(app_pc src))
{
/* FIXME PR 276762: not implemented */
return false;
}
#endif
#endif /* RCT_IND_BRANCH */
#ifdef HOT_PATCHING_INTERFACE
void*
get_drmarker_hotp_policy_status_table()
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
set_drmarker_hotp_policy_status_table(void *new_table)
{
ASSERT_NOT_IMPLEMENTED(false);
}
byte *
hook_text(byte *hook_code_buf, const app_pc image_addr,
intercept_function_t hook_func, const void *callee_arg,
const after_intercept_action_t action_after,
const bool abort_if_hooked, const bool ignore_cti,
byte **app_code_copy_p, byte **alt_exit_tgt_p)
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
unhook_text(byte *hook_code_buf, app_pc image_addr)
{
ASSERT_NOT_IMPLEMENTED(false);
}
void
insert_jmp_at_tramp_entry(dcontext_t *dcontext, byte *trampoline, byte *target)
{
ASSERT_NOT_IMPLEMENTED(false);
}
#endif /* HOT_PATCHING_INTERFACE */
bool
aslr_is_possible_attack(app_pc target)
{
/* FIXME: ASLR not implemented */
return false;
}
app_pc
aslr_possible_preferred_address(app_pc target_addr)
{
/* FIXME: ASLR not implemented */
return NULL;
}
void
take_over_primary_thread()
{
/* nothing to do here */
}
bool
os_current_user_directory(char *directory_prefix /* INOUT */,
uint directory_len,
bool create)
{
/* XXX: could share some of this code w/ corresponding windows routine */
uid_t uid = dynamorio_syscall(SYS_getuid, 0);
char *directory = directory_prefix;
char *dirend = directory_prefix + strlen(directory_prefix);
snprintf(dirend, directory_len - (dirend - directory_prefix), "%cdpc-%d",
DIRSEP, uid);
directory_prefix[directory_len - 1] = '\0';
if (!os_file_exists(directory, true/*is dir*/) && create) {
/* XXX: we should ensure we do not follow symlinks */
/* XXX: should add support for CREATE_DIR_FORCE_OWNER */
if (!os_create_dir(directory, CREATE_DIR_REQUIRE_NEW)) {
LOG(GLOBAL, LOG_CACHE, 2,
"\terror creating per-user dir %s\n", directory);
return false;
} else {
LOG(GLOBAL, LOG_CACHE, 2,
"\tcreated per-user dir %s\n", directory);
}
}
return true;
}
bool
os_validate_user_owned(file_t file_or_directory_handle)
{
/* note on Linux this scheme should never be used */
ASSERT(false && "chown Alice evilfile");
return false;
}
bool
os_check_option_compatibility(void)
{
/* no options are Linux OS version dependent */
return false;
}
#ifdef X86_32
/* Emulate uint64 modulo and division by uint32 on ia32.
* XXX: Does *not* handle 64-bit divisors!
*/
static uint64
uint64_divmod(uint64 dividend, uint64 divisor64, uint32 *remainder)
{
/* Assumes little endian, which x86 is. */
union {
uint64 v64;
struct {
uint32 lo;
uint32 hi;
};
} res;
uint32 upper;
uint32 divisor = (uint32) divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= UINT_MAX && "divisor is larger than uint32 can hold");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Use the unsigned div instruction, which uses EDX:EAX to form a 64-bit
* dividend. We only get a 32-bit quotient out, which is why we divide out
* the high bits first. The quotient will fit in EAX.
*
* DIV r/m32 F7 /6 Unsigned divide EDX:EAX by r/m32, with result stored
* in EAX <- Quotient, EDX <- Remainder.
* inputs:
* EAX = res.lo
* EDX = upper
* rm = divisor
* outputs:
* res.lo = EAX
* *remainder = EDX
* The outputs precede the inputs in gcc inline asm syntax, and so to put
* inputs in EAX and EDX we use "0" and "1".
*/
asm ("divl %2" : "=a" (res.lo), "=d" (*remainder) :
"rm" (divisor), "0" (res.lo), "1" (upper));
return res.v64;
}
/* Match libgcc's prototype. */
uint64
__udivdi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
return uint64_divmod(dividend, divisor, &remainder);
}
/* Match libgcc's prototype. */
uint64
__umoddi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
uint64_divmod(dividend, divisor, &remainder);
return (uint64) remainder;
}
#elif defined (ARM)
/* i#1566: for ARM, __aeabi versions are used instead of udivdi3 and umoddi3.
* We link with __aeabi routines from libgcc via third_party/libgcc.
*/
#endif /* X86_32 */
#endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */
/****************************************************************************
* Page size discovery and query
*/
/* This variable is only used by os_set_page_size and os_page_size, but those
* functions may be called before libdynamorio.so has been relocated. So check
* the disassembly of those functions: there should be no relocations.
*/
static size_t page_size = 0;
/* Return true if size is a multiple of the page size.
* XXX: This function may be called when DynamoRIO is in a fragile state, or not
* yet relocated, so keep this self-contained and do not use global variables or
* logging.
*/
static bool
os_try_page_size(size_t size)
{
byte *addr = mmap_syscall(NULL, size * 2,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if ((ptr_uint_t)addr >= (ptr_uint_t)-4096) /* mmap failed: should not happen */
return false;
if (munmap_syscall(addr + size, size) == 0) {
/* munmap of top half succeeded: munmap bottom half and return true */
munmap_syscall(addr, size);
return true;
}
/* munmap of top half failed: munmap whole region and return false */
munmap_syscall(addr, size * 2);
return false;
}
/* Directly determine the granularity of memory allocation using mmap and munmap.
* This is used as a last resort if the page size is required before it has been
* discovered in any other way, such as from AT_PAGESZ.
* XXX: This function may be called when DynamoRIO is in a fragile state, or not
* yet relocated, so keep this self-contained and do not use global variables or
* logging.
*/
static size_t
os_find_page_size(void)
{
size_t size = 4096;
if (os_try_page_size(size)) {
/* Try smaller sizes. */
for (size /= 2; size > 0; size /= 2) {
if (!os_try_page_size(size))
return size * 2;
}
} else {
/* Try larger sizes. */
for (size *= 2; size * 2 > 0; size *= 2) {
if (os_try_page_size(size))
return size;
}
}
/* Something went wrong... */
return 4096;
}
static void
os_set_page_size(size_t size)
{
page_size = size; /* atomic write */
}
size_t
os_page_size(void)
{
size_t size = page_size; /* atomic read */
if (size == 0) {
/* XXX: On Mac OSX we should use sysctl_query on hw.pagesize. */
size = os_find_page_size();
os_set_page_size(size);
}
return size;
}
void
os_page_size_init(const char **env)
{
#if defined(LINUX) && !defined(STATIC_LIBRARY)
/* On Linux we get the page size from the auxiliary vector, which is what
* the C library typically does for implementing sysconf(_SC_PAGESIZE).
* However, for STATIC_LIBRARY, our_environ is not guaranteed to point
* at the stack as we're so late, so we do not try to read off the end of it
* (i#2122).
*/
size_t size = page_size; /* atomic read */
if (size == 0) {
ELF_AUXV_TYPE *auxv;
/* Skip environment. */
while (*env != 0)
++env;
/* Look for AT_PAGESZ in the auxiliary vector. */
for (auxv = (ELF_AUXV_TYPE *)(env + 1); auxv->a_type != AT_NULL; auxv++) {
if (auxv->a_type == AT_PAGESZ) {
os_set_page_size(auxv->a_un.a_val);
break;
}
}
}
#endif /* LINUX */
}
/****************************************************************************
* Tests
*/
#if defined(STANDALONE_UNIT_TEST)
void
test_uint64_divmod(void)
{
#ifdef X86_32
uint64 quotient;
uint32 remainder;
/* Simple division below 2^32. */
quotient = uint64_divmod(9, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod(10, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 1, true);
/* Division when upper bits are less than the divisor. */
quotient = uint64_divmod(45ULL << 31, 1U << 31, &remainder);
EXPECT(quotient == 45, true);
EXPECT(remainder == 0, true);
/* Division when upper bits are greater than the divisor. */
quotient = uint64_divmod(45ULL << 32, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod((45ULL << 32) + 13, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 13, true);
/* Try calling the intrinsics. Don't divide by powers of two, gcc will
* lower that to a shift.
*/
quotient = (45ULL << 32);
quotient /= 15;
EXPECT(quotient == (3ULL << 32), true);
quotient = (45ULL << 32) + 13;
remainder = quotient % 15;
EXPECT(remainder == 13, true);
#endif /* X86_32 */
}
void
unit_test_os(void)
{
test_uint64_divmod();
}
#endif /* STANDALONE_UNIT_TEST */
| 1 | 10,900 | Wait -- os_local_state_t.tid is thread_id_t though, so we need to read a pointer-sized value via READ_TLS_SLOT_IMM, rather than changing these locals to ints. Maybe have a READ_TLS_TIDSZ_SLOT_IMM or sthg. | DynamoRIO-dynamorio | c |
@@ -13311,7 +13311,11 @@ public:
HRESULT Status;
ICorDebugProcess* pCorDebugProcess;
- IfFailRet(g_pRuntime->GetCorDebugInterface(&pCorDebugProcess));
+ if (FAILED(Status = g_pRuntime->GetCorDebugInterface(&pCorDebugProcess)))
+ {
+ ExtOut("\n\n\n!clrstack -i is unsupported on this target.\nThe ICorDebug interface cannot be constructed.\n");
+ return Status;
+ }
ExtOut("\n\n\nDumping managed stack and managed variables using ICorDebug.\n");
ExtOut("=============================================================================\n"); | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ==++==
//
//
// ==--==
// ===========================================================================
// STRIKE.CPP
// ===========================================================================
//
// History:
// 09/07/99 Microsoft Created
//
//************************************************************************************************
// SOS is the native debugging extension designed to support investigations into CLR (mis-)
// behavior by both users of the runtime as well as the code owners. It allows inspection of
// internal structures, of user visible entities, as well as execution control.
//
// This is the main SOS file hosting the implementation of all the exposed commands. A good
// starting point for understanding the semantics of these commands is the sosdocs.txt file.
//
// #CrossPlatformSOS
// SOS currently supports cross platform debugging from x86 to ARM. It takes a different approach
// from the DAC: whereas for the DAC we produce one binary for each supported host-target
// architecture pair, for SOS we produce only one binary for each host architecture; this one
// binary contains code for all supported target architectures. In doing this SOS depends on two
// assumptions:
// . that the debugger will load the appropriate DAC, and
// . that the host and target word size is identical.
// The second assumption is identical to the DAC assumption, and there will be considerable effort
// required (in the EE, the DAC, and SOS) if we ever need to remove it.
//
// In an ideal world SOS would be able to retrieve all platform specific information it needs
// either from the debugger or from DAC. However, SOS has taken some subtle and not so subtle
// dependencies on the CLR and the target platform.
// To resolve this problem, SOS now abstracts the target behind the IMachine interface, and uses
// calls on IMachine to take target-specific actions. It implements X86Machine, ARMMachine, and
// AMD64Machine. An instance of these exists in each appropriate host (e.g. the X86 version of SOS
// contains instances of X86Machine and ARMMachine, the ARM version contains an instance of
// ARMMachine, and the AMD64 version contains an instance of AMD64Machine). The code included in
// each version if determined by the SosTarget*** MSBuild symbols, and SOS_TARGET_*** conditional
// compilation symbols (as specified in sos.targets).
//
// Most of the target specific code is hosted in disasm.h/.cpp, and disasmX86.cpp, disasmARM.cpp.
// Some code currently under _TARGET_*** ifdefs may need to be reviewed/revisited.
//
// Issues:
// The one-binary-per-host decision does have some drawbacks:
// . Currently including system headers or even CLR headers will only account for the host
// target, IOW, when building the X86 version of SOS, CONTEXT will refer to the X86 CONTEXT
// structure, so we need to be careful when debugging ARM targets. The CONTEXT issue is
// partially resolved by CROSS_PLATFORM_CONTEXT (there is still a need to be very careful
// when handling arrays of CONTEXTs - see _EFN_StackTrace for details on this).
// . For larger includes (e.g. GC info), we will need to include files in specific namespaces,
// with specific _TARGET_*** macros defined in order to avoid name clashes and ensure correct
// system types are used.
// -----------------------------------------------------------------------------------------------
#define DO_NOT_DISABLE_RAND //this is a standalone tool, and can use rand()
#include <windows.h>
#include <winver.h>
#include <winternl.h>
#include <psapi.h>
#ifndef FEATURE_PAL
#include <list>
#endif // !FEATURE_PAL
#include <wchar.h>
#include "platformspecific.h"
#define NOEXTAPI
#define KDEXT_64BIT
#include <wdbgexts.h>
#undef DECLARE_API
#undef StackTrace
#include <dbghelp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <stdexcept>
#include <deque>
#include "strike.h"
#include "sos.h"
#ifndef STRESS_LOG
#define STRESS_LOG
#endif // STRESS_LOG
#define STRESS_LOG_READONLY
#include "stresslog.h"
#include "util.h"
#include "corhdr.h"
#include "cor.h"
#include "cordebug.h"
#include "dacprivate.h"
#include "corexcep.h"
#define CORHANDLE_MASK 0x1
#define SWITCHED_OUT_FIBER_OSID 0xbaadf00d;
#define DEFINE_EXT_GLOBALS
#include "data.h"
#include "disasm.h"
#include "predeftlsslot.h"
#ifndef FEATURE_PAL
#include "hillclimbing.h"
#endif
#include "sos_md.h"
#ifndef FEATURE_PAL
#include "ExpressionNode.h"
#include "WatchCmd.h"
#include <algorithm>
#include "tls.h"
typedef struct _VM_COUNTERS {
SIZE_T PeakVirtualSize;
SIZE_T VirtualSize;
ULONG PageFaultCount;
SIZE_T PeakWorkingSetSize;
SIZE_T WorkingSetSize;
SIZE_T QuotaPeakPagedPoolUsage;
SIZE_T QuotaPagedPoolUsage;
SIZE_T QuotaPeakNonPagedPoolUsage;
SIZE_T QuotaNonPagedPoolUsage;
SIZE_T PagefileUsage;
SIZE_T PeakPagefileUsage;
} VM_COUNTERS;
typedef VM_COUNTERS *PVM_COUNTERS;
const PROCESSINFOCLASS ProcessVmCounters = static_cast<PROCESSINFOCLASS>(3);
#endif // !FEATURE_PAL
// Max number of methods that !dumpmodule -prof will print
const UINT kcMaxMethodDescsForProfiler = 100;
#include <set>
#include <vector>
#include <map>
#include <tuple>
#include <memory>
#include <functional>
BOOL ControlC = FALSE;
WCHAR g_mdName[mdNameLen];
#ifndef FEATURE_PAL
HMODULE g_hInstance = NULL;
#include <algorithm>
#endif // !FEATURE_PAL
#ifdef _MSC_VER
#pragma warning(disable:4244) // conversion from 'unsigned int' to 'unsigned short', possible loss of data
#pragma warning(disable:4189) // local variable is initialized but not referenced
#endif
#ifdef FEATURE_PAL
#define SOSPrefix ""
#define SOSThreads "clrthreads"
#else
#define SOSPrefix "!"
#define SOSThreads "!threads"
#endif
#if defined _X86_ && !defined FEATURE_PAL
// disable FPO for X86 builds
#pragma optimize("y", off)
#endif
#undef assert
#ifdef _MSC_VER
#pragma warning(default:4244)
#pragma warning(default:4189)
#endif
#ifndef FEATURE_PAL
#include "ntinfo.h"
#endif // FEATURE_PAL
#ifndef IfFailRet
#define IfFailRet(EXPR) do { Status = (EXPR); if(FAILED(Status)) { return (Status); } } while (0)
#endif
#ifdef FEATURE_PAL
#define MINIDUMP_NOT_SUPPORTED()
#define ONLY_SUPPORTED_ON_WINDOWS_TARGET()
#else // !FEATURE_PAL
#define MINIDUMP_NOT_SUPPORTED() \
if (IsMiniDumpFile()) \
{ \
ExtOut("This command is not supported in a minidump without full memory\n"); \
ExtOut("To try the command anyway, run !MinidumpMode 0\n"); \
return Status; \
}
#define ONLY_SUPPORTED_ON_WINDOWS_TARGET() \
if (!IsWindowsTarget()) \
{ \
ExtOut("This command is only supported for Windows targets\n"); \
return Status; \
}
#include "safemath.h"
DECLARE_API (MinidumpMode)
{
INIT_API ();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR Value=0;
CMDValue arg[] =
{ // vptr, type
{&Value, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg == 0)
{
// Print status of current mode
ExtOut("Current mode: %s - unsafe minidump commands are %s.\n",
g_InMinidumpSafeMode ? "1" : "0",
g_InMinidumpSafeMode ? "disabled" : "enabled");
}
else
{
if (Value != 0 && Value != 1)
{
ExtOut("Mode must be 0 or 1\n");
return Status;
}
g_InMinidumpSafeMode = (BOOL) Value;
ExtOut("Unsafe minidump commands are %s.\n",
g_InMinidumpSafeMode ? "disabled" : "enabled");
}
return Status;
}
#endif // FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to get the MethodDesc for a given eip *
* *
\**********************************************************************/
DECLARE_API(IP2MD)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL dml = FALSE;
TADDR IP = 0;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&IP, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (IP == 0)
{
ExtOut("%s is not IP\n", args);
return Status;
}
CLRDATA_ADDRESS cdaStart = TO_CDADDR(IP);
CLRDATA_ADDRESS pMD;
if ((Status = g_sos->GetMethodDescPtrFromIP(cdaStart, &pMD)) != S_OK)
{
ExtOut("Failed to request MethodData, not in JIT code range\n");
return Status;
}
DMLOut("MethodDesc: %s\n", DMLMethodDesc(pMD));
DumpMDInfo(TO_TADDR(pMD), cdaStart, FALSE /* fStackTraceFormat */);
WCHAR filename[MAX_LONGPATH];
ULONG linenum;
// symlines will be non-zero only if SYMOPT_LOAD_LINES was set in the symbol options
ULONG symlines = 0;
if (SUCCEEDED(g_ExtSymbols->GetSymbolOptions(&symlines)))
{
symlines &= SYMOPT_LOAD_LINES;
}
if (symlines != 0 &&
SUCCEEDED(GetLineByOffset(TO_CDADDR(IP), &linenum, filename, _countof(filename))))
{
ExtOut("Source file: %S @ %d\n", filename, linenum);
}
return Status;
}
// (MAX_STACK_FRAMES is also used by x86 to prevent infinite loops in _EFN_StackTrace)
#define MAX_STACK_FRAMES 1000
#if defined(_TARGET_WIN64_)
#define DEBUG_STACK_CONTEXT AMD64_CONTEXT
#elif defined(_TARGET_ARM_) // _TARGET_WIN64_
#define DEBUG_STACK_CONTEXT ARM_CONTEXT
#elif defined(_TARGET_X86_) // _TARGET_ARM_
#define DEBUG_STACK_CONTEXT X86_CONTEXT
#endif // _TARGET_X86_
#ifdef DEBUG_STACK_CONTEXT
// I use a global set of frames for stack walking on win64 because the debugger's
// GetStackTrace function doesn't provide a way to find out the total size of a stackwalk,
// and I'd like to have a reasonably big maximum without overflowing the stack by declaring
// the buffer locally and I also want to get a managed trace in a low memory environment
// (so no dynamic allocation if possible).
DEBUG_STACK_FRAME g_Frames[MAX_STACK_FRAMES];
DEBUG_STACK_CONTEXT g_FrameContexts[MAX_STACK_FRAMES];
static HRESULT
GetContextStackTrace(ULONG osThreadId, PULONG pnumFrames)
{
PDEBUG_CONTROL4 debugControl4;
HRESULT hr = S_OK;
*pnumFrames = 0;
// Do we have advanced capability?
if (g_ExtControl->QueryInterface(__uuidof(IDebugControl4), (void **)&debugControl4) == S_OK)
{
ULONG oldId, id;
g_ExtSystem->GetCurrentThreadId(&oldId);
if ((hr = g_ExtSystem->GetThreadIdBySystemId(osThreadId, &id)) != S_OK) {
return hr;
}
g_ExtSystem->SetCurrentThreadId(id);
// GetContextStackTrace fills g_FrameContexts as an array of
// contexts packed as target architecture contexts. We cannot
// safely cast this as an array of CROSS_PLATFORM_CONTEXT, since
// sizeof(CROSS_PLATFORM_CONTEXT) != sizeof(TGT_CONTEXT)
hr = debugControl4->GetContextStackTrace(
NULL,
0,
g_Frames,
MAX_STACK_FRAMES,
g_FrameContexts,
MAX_STACK_FRAMES*g_targetMachine->GetContextSize(),
g_targetMachine->GetContextSize(),
pnumFrames);
g_ExtSystem->SetCurrentThreadId(oldId);
debugControl4->Release();
}
return hr;
}
#endif // DEBUG_STACK_CONTEXT
/**********************************************************************\
* Routine Description: *
* *
* This function displays the stack trace. It looks at each DWORD *
* on stack. If the DWORD is a return address, the symbol name or
* managed function name is displayed. *
* *
\**********************************************************************/
void DumpStackInternal(DumpStackFlag *pDSFlag)
{
ReloadSymbolWithLineInfo();
ULONG64 StackOffset;
g_ExtRegisters->GetStackOffset (&StackOffset);
if (pDSFlag->top == 0) {
pDSFlag->top = TO_TADDR(StackOffset);
}
size_t value;
while (g_ExtData->ReadVirtual(TO_CDADDR(pDSFlag->top), &value, sizeof(size_t), NULL) != S_OK) {
if (IsInterrupt())
return;
pDSFlag->top = NextOSPageAddress(pDSFlag->top);
}
#ifndef FEATURE_PAL
if (IsWindowsTarget() && (pDSFlag->end == 0)) {
// Find the current stack range
NT_TIB teb;
ULONG64 dwTebAddr = 0;
if (SUCCEEDED(g_ExtSystem->GetCurrentThreadTeb(&dwTebAddr)))
{
if (SafeReadMemory(TO_TADDR(dwTebAddr), &teb, sizeof(NT_TIB), NULL))
{
if (pDSFlag->top > TO_TADDR(teb.StackLimit)
&& pDSFlag->top <= TO_TADDR(teb.StackBase))
{
if (pDSFlag->end == 0 || pDSFlag->end > TO_TADDR(teb.StackBase))
pDSFlag->end = TO_TADDR(teb.StackBase);
}
}
}
}
#endif // FEATURE_PAL
if (pDSFlag->end == 0)
{
ExtOut("TEB information is not available so a stack size of 0xFFFF is assumed\n");
pDSFlag->end = pDSFlag->top + 0xFFFF;
}
if (pDSFlag->end < pDSFlag->top)
{
ExtOut("Wrong option: stack selection wrong\n");
return;
}
DumpStackWorker(*pDSFlag);
}
DECLARE_API(DumpStack)
{
INIT_API_NO_RET_ON_FAILURE();
MINIDUMP_NOT_SUPPORTED();
DumpStackFlag DSFlag;
DSFlag.fEEonly = FALSE;
DSFlag.fSuppressSrcInfo = FALSE;
DSFlag.top = 0;
DSFlag.end = 0;
BOOL unwind = FALSE;
BOOL dml = FALSE;
CMDOption option[] = {
// name, vptr, type, hasValue
{"-EE", &DSFlag.fEEonly, COBOOL, FALSE},
{"-n", &DSFlag.fSuppressSrcInfo, COBOOL, FALSE},
{"-unwind", &unwind, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE}
#endif
};
CMDValue arg[] = {
// vptr, type
{&DSFlag.top, COHEX},
{&DSFlag.end, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
return Status;
// symlines will be non-zero only if SYMOPT_LOAD_LINES was set in the symbol options
ULONG symlines = 0;
if (!DSFlag.fSuppressSrcInfo && SUCCEEDED(g_ExtSymbols->GetSymbolOptions(&symlines)))
{
symlines &= SYMOPT_LOAD_LINES;
}
DSFlag.fSuppressSrcInfo = DSFlag.fSuppressSrcInfo || (symlines == 0);
EnableDMLHolder enabledml(dml);
ULONG sysId = 0, id = 0;
g_ExtSystem->GetCurrentThreadSystemId(&sysId);
ExtOut("OS Thread Id: 0x%x ", sysId);
g_ExtSystem->GetCurrentThreadId(&id);
ExtOut("(%d)\n", id);
DumpStackInternal(&DSFlag);
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function displays the stack trace for threads that EE knows *
* from ThreadStore. *
* *
\**********************************************************************/
DECLARE_API (EEStack)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DumpStackFlag DSFlag;
DSFlag.fEEonly = FALSE;
DSFlag.fSuppressSrcInfo = FALSE;
DSFlag.top = 0;
DSFlag.end = 0;
BOOL bShortList = FALSE;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-EE", &DSFlag.fEEonly, COBOOL, FALSE},
{"-short", &bShortList, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE}
#endif
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
EnableDMLHolder enableDML(dml);
ULONG Tid;
g_ExtSystem->GetCurrentThreadId(&Tid);
DacpThreadStoreData ThreadStore;
if ((Status = ThreadStore.Request(g_sos)) != S_OK)
{
ExtOut("Failed to request ThreadStore\n");
return Status;
}
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread)
{
if (IsInterrupt())
break;
DacpThreadData Thread;
if ((Status = Thread.Request(g_sos, CurThread)) != S_OK)
{
ExtOut("Failed to request Thread at %p\n", CurThread);
return Status;
}
ULONG id=0;
if (g_ExtSystem->GetThreadIdBySystemId (Thread.osThreadId, &id) != S_OK)
{
CurThread = Thread.nextThread;
continue;
}
ExtOut("---------------------------------------------\n");
ExtOut("Thread %3d\n", id);
BOOL doIt = FALSE;
#define TS_Hijacked 0x00000080
if (!bShortList)
{
doIt = TRUE;
}
else if ((Thread.lockCount > 0) || (Thread.state & TS_Hijacked))
{
// TODO: bring back || (int)vThread.m_pFrame != -1 {
doIt = TRUE;
}
else
{
ULONG64 IP;
g_ExtRegisters->GetInstructionOffset (&IP);
JITTypes jitType;
TADDR methodDesc;
TADDR gcinfoAddr;
IP2MethodDesc (TO_TADDR(IP), methodDesc, jitType, gcinfoAddr);
if (methodDesc)
{
doIt = TRUE;
}
}
if (doIt)
{
g_ExtSystem->SetCurrentThreadId(id);
DSFlag.top = 0;
DSFlag.end = 0;
DumpStackInternal(&DSFlag);
}
CurThread = Thread.nextThread;
}
g_ExtSystem->SetCurrentThreadId(Tid);
return Status;
}
HRESULT DumpStackObjectsRaw(size_t nArg, __in_z LPSTR exprBottom, __in_z LPSTR exprTop, BOOL bVerify)
{
size_t StackTop = 0;
size_t StackBottom = 0;
if (nArg==0)
{
ULONG64 StackOffset;
g_ExtRegisters->GetStackOffset(&StackOffset);
StackTop = TO_TADDR(StackOffset);
}
else
{
StackTop = GetExpression(exprTop);
if (StackTop == 0)
{
ExtOut("wrong option: %s\n", exprTop);
return E_FAIL;
}
if (nArg==2)
{
StackBottom = GetExpression(exprBottom);
if (StackBottom == 0)
{
ExtOut("wrong option: %s\n", exprBottom);
return E_FAIL;
}
}
}
#ifndef FEATURE_PAL
if (IsWindowsTarget())
{
NT_TIB teb;
ULONG64 dwTebAddr = 0;
HRESULT hr = g_ExtSystem->GetCurrentThreadTeb(&dwTebAddr);
if (SUCCEEDED(hr) && SafeReadMemory(TO_TADDR(dwTebAddr), &teb, sizeof(NT_TIB), NULL))
{
if (StackTop > TO_TADDR(teb.StackLimit) && StackTop <= TO_TADDR(teb.StackBase))
{
if (StackBottom == 0 || StackBottom > TO_TADDR(teb.StackBase))
StackBottom = TO_TADDR(teb.StackBase);
}
}
}
#endif
if (StackBottom == 0)
StackBottom = StackTop + 0xFFFF;
if (StackBottom < StackTop)
{
ExtOut("Wrong option: stack selection wrong\n");
return E_FAIL;
}
// We can use the gc snapshot to eliminate object addresses that are
// not on the gc heap.
if (!g_snapshot.Build())
{
ExtOut("Unable to determine bounds of gc heap\n");
return E_FAIL;
}
// Print thread ID.
ULONG id = 0;
g_ExtSystem->GetCurrentThreadSystemId (&id);
ExtOut("OS Thread Id: 0x%x ", id);
g_ExtSystem->GetCurrentThreadId (&id);
ExtOut("(%d)\n", id);
DumpStackObjectsHelper(StackTop, StackBottom, bVerify);
return S_OK;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the address and name of all *
* Managed Objects on the stack. *
* *
\**********************************************************************/
DECLARE_API(DumpStackObjects)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
StringHolder exprTop, exprBottom;
BOOL bVerify = FALSE;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-verify", &bVerify, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE}
#endif
};
CMDValue arg[] =
{ // vptr, type
{&exprTop.data, COSTRING},
{&exprBottom.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder enableDML(dml);
return DumpStackObjectsRaw(nArg, exprBottom.data, exprTop.data, bVerify);
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a MethodDesc *
* for a given address *
* *
\**********************************************************************/
DECLARE_API(DumpMD)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR dwStartAddr = NULL;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&dwStartAddr, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
DumpMDInfo(dwStartAddr);
return Status;
}
BOOL GatherDynamicInfo(TADDR DynamicMethodObj, DacpObjectData *codeArray,
DacpObjectData *tokenArray, TADDR *ptokenArrayAddr)
{
BOOL bRet = FALSE;
int iOffset;
DacpObjectData objData; // temp object
if (codeArray == NULL || tokenArray == NULL)
return bRet;
if (objData.Request(g_sos, TO_CDADDR(DynamicMethodObj)) != S_OK)
return bRet;
iOffset = GetObjFieldOffset(TO_CDADDR(DynamicMethodObj), objData.MethodTable, W("m_resolver"));
if (iOffset <= 0)
return bRet;
TADDR resolverPtr;
if (FAILED(MOVE(resolverPtr, DynamicMethodObj + iOffset)))
return bRet;
if (objData.Request(g_sos, TO_CDADDR(resolverPtr)) != S_OK)
return bRet;
iOffset = GetObjFieldOffset(TO_CDADDR(resolverPtr), objData.MethodTable, W("m_code"));
if (iOffset <= 0)
return bRet;
TADDR codePtr;
if (FAILED(MOVE(codePtr, resolverPtr + iOffset)))
return bRet;
if (codeArray->Request(g_sos, TO_CDADDR(codePtr)) != S_OK)
return bRet;
if (codeArray->dwComponentSize != 1)
return bRet;
// We also need the resolution table
iOffset = GetObjFieldOffset (TO_CDADDR(resolverPtr), objData.MethodTable, W("m_scope"));
if (iOffset <= 0)
return bRet;
TADDR scopePtr;
if (FAILED(MOVE(scopePtr, resolverPtr + iOffset)))
return bRet;
if (objData.Request(g_sos, TO_CDADDR(scopePtr)) != S_OK)
return bRet;
iOffset = GetObjFieldOffset (TO_CDADDR(scopePtr), objData.MethodTable, W("m_tokens"));
if (iOffset <= 0)
return bRet;
TADDR tokensPtr;
if (FAILED(MOVE(tokensPtr, scopePtr + iOffset)))
return bRet;
if (objData.Request(g_sos, TO_CDADDR(tokensPtr)) != S_OK)
return bRet;
iOffset = GetObjFieldOffset(TO_CDADDR(tokensPtr), objData.MethodTable, W("_items"));
if (iOffset <= 0)
return bRet;
TADDR itemsPtr;
MOVE (itemsPtr, tokensPtr + iOffset);
*ptokenArrayAddr = itemsPtr;
if (tokenArray->Request(g_sos, TO_CDADDR(itemsPtr)) != S_OK)
return bRet;
bRet = TRUE; // whew.
return bRet;
}
typedef std::tuple<TADDR, IMetaDataImport* > GetILAddressResult;
GetILAddressResult GetILAddress(const DacpMethodDescData& MethodDescData);
/**********************************************************************\
* Routine Description: *
* *
* Displays the Microsoft intermediate language (MSIL) that is *
* associated with a managed method. *
\**********************************************************************/
DECLARE_API(DumpIL)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR dwStartAddr = NULL;
DWORD_PTR dwDynamicMethodObj = NULL;
BOOL dml = FALSE;
BOOL fILPointerDirectlySpecified = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
{"/i", &fILPointerDirectlySpecified, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&dwStartAddr, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (dwStartAddr == NULL)
{
ExtOut("Must pass a valid expression\n");
return Status;
}
if (fILPointerDirectlySpecified)
{
return DecodeILFromAddress(NULL, dwStartAddr);
}
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return Status;
}
if (g_snapshot.GetHeap(dwStartAddr) != NULL)
{
dwDynamicMethodObj = dwStartAddr;
}
if (dwDynamicMethodObj == NULL)
{
// We have been given a MethodDesc
DacpMethodDescData MethodDescData;
if (MethodDescData.Request(g_sos, TO_CDADDR(dwStartAddr)) != S_OK)
{
ExtOut("%p is not a MethodDesc\n", SOS_PTR(dwStartAddr));
return Status;
}
if (MethodDescData.bIsDynamic && MethodDescData.managedDynamicMethodObject)
{
dwDynamicMethodObj = TO_TADDR(MethodDescData.managedDynamicMethodObject);
if (dwDynamicMethodObj == NULL)
{
ExtOut("Unable to print IL for DynamicMethodDesc %p\n", SOS_PTR(dwDynamicMethodObj));
return Status;
}
}
else
{
GetILAddressResult result = GetILAddress(MethodDescData);
if (std::get<0>(result) == NULL)
{
ExtOut("ilAddr is %p\n", SOS_PTR(std::get<0>(result)));
return E_FAIL;
}
ExtOut("ilAddr is %p pImport is %p\n", SOS_PTR(std::get<0>(result)), SOS_PTR(std::get<1>(result)));
TADDR ilAddr = std::get<0>(result);
ToRelease<IMetaDataImport> pImport(std::get<1>(result));
IfFailRet(DecodeILFromAddress(pImport, ilAddr));
}
}
if (dwDynamicMethodObj != NULL)
{
// We have a DynamicMethod managed object, let us visit the town and paint.
DacpObjectData codeArray;
DacpObjectData tokenArray;
DWORD_PTR tokenArrayAddr;
if (!GatherDynamicInfo (dwDynamicMethodObj, &codeArray, &tokenArray, &tokenArrayAddr))
{
DMLOut("Error gathering dynamic info from object at %s.\n", DMLObject(dwDynamicMethodObj));
return Status;
}
// Read the memory into a local buffer
BYTE *pArray = new NOTHROW BYTE[(SIZE_T)codeArray.dwNumComponents];
if (pArray == NULL)
{
ExtOut("Not enough memory to read IL\n");
return Status;
}
Status = g_ExtData->ReadVirtual(UL64_TO_CDA(codeArray.ArrayDataPtr), pArray, (ULONG)codeArray.dwNumComponents, NULL);
if (Status != S_OK)
{
ExtOut("Failed to read memory\n");
delete [] pArray;
return Status;
}
// Now we have a local copy of the IL, and a managed array for token resolution.
// Visit our IL parser with this info.
ExtOut("This is dynamic IL. Exception info is not reported at this time.\n");
ExtOut("If a token is unresolved, run \"!do <addr>\" on the addr given\n");
ExtOut("in parenthesis. You can also look at the token table yourself, by\n");
ExtOut("running \"!DumpArray %p\".\n\n", SOS_PTR(tokenArrayAddr));
DecodeDynamicIL(pArray, (ULONG)codeArray.dwNumComponents, tokenArray);
delete [] pArray;
}
return Status;
}
void DumpSigWorker (
DWORD_PTR dwSigAddr,
DWORD_PTR dwModuleAddr,
BOOL fMethod)
{
//
// Find the length of the signature and copy it into the debugger process.
//
ULONG cbSig = 0;
const ULONG cbSigInc = 256;
ArrayHolder<COR_SIGNATURE> pSig = new NOTHROW COR_SIGNATURE[cbSigInc];
if (pSig == NULL)
{
ReportOOM();
return;
}
CQuickBytes sigString;
for (;;)
{
if (IsInterrupt())
return;
ULONG cbCopied;
if (!SafeReadMemory(TO_TADDR(dwSigAddr + cbSig), pSig + cbSig, cbSigInc, &cbCopied))
return;
cbSig += cbCopied;
sigString.ReSize(0);
GetSignatureStringResults result;
if (fMethod)
result = GetMethodSignatureString(pSig, cbSig, dwModuleAddr, &sigString);
else
result = GetSignatureString(pSig, cbSig, dwModuleAddr, &sigString);
if (GSS_ERROR == result)
return;
if (GSS_SUCCESS == result)
break;
// If we didn't get the full amount back, and we failed to parse the
// signature, it's not valid because of insufficient data
if (cbCopied < 256)
{
ExtOut("Invalid signature\n");
return;
}
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:6280) // "Suppress PREFast warning about mismatch alloc/free"
#endif
PCOR_SIGNATURE pSigNew = (PCOR_SIGNATURE)realloc(pSig, cbSig+cbSigInc);
#ifdef _PREFAST_
#pragma warning(pop)
#endif
if (pSigNew == NULL)
{
ExtOut("Out of memory\n");
return;
}
pSig = pSigNew;
}
ExtOut("%S\n", (PCWSTR)sigString.Ptr());
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump a signature object. *
* *
\**********************************************************************/
DECLARE_API(DumpSig)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
//
// Fetch arguments
//
StringHolder sigExpr;
StringHolder moduleExpr;
CMDValue arg[] =
{
{&sigExpr.data, COSTRING},
{&moduleExpr.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg != 2)
{
ExtOut("!DumpSig <sigaddr> <moduleaddr>\n");
return Status;
}
DWORD_PTR dwSigAddr = GetExpression(sigExpr.data);
DWORD_PTR dwModuleAddr = GetExpression(moduleExpr.data);
if (dwSigAddr == 0 || dwModuleAddr == 0)
{
ExtOut("Invalid parameters %s %s\n", sigExpr.data, moduleExpr.data);
return Status;
}
DumpSigWorker(dwSigAddr, dwModuleAddr, TRUE);
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump a portion of a signature object. *
* *
\**********************************************************************/
DECLARE_API(DumpSigElem)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
//
// Fetch arguments
//
StringHolder sigExpr;
StringHolder moduleExpr;
CMDValue arg[] =
{
{&sigExpr.data, COSTRING},
{&moduleExpr.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg != 2)
{
ExtOut("!DumpSigElem <sigaddr> <moduleaddr>\n");
return Status;
}
DWORD_PTR dwSigAddr = GetExpression(sigExpr.data);
DWORD_PTR dwModuleAddr = GetExpression(moduleExpr.data);
if (dwSigAddr == 0 || dwModuleAddr == 0)
{
ExtOut("Invalid parameters %s %s\n", sigExpr.data, moduleExpr.data);
return Status;
}
DumpSigWorker(dwSigAddr, dwModuleAddr, FALSE);
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of an EEClass from *
* a given address
* *
\**********************************************************************/
DECLARE_API(DumpClass)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR dwStartAddr = 0;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&dwStartAddr, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg == 0)
{
ExtOut("Missing EEClass address\n");
return Status;
}
EnableDMLHolder dmlHolder(dml);
CLRDATA_ADDRESS methodTable;
if ((Status=g_sos->GetMethodTableForEEClass(TO_CDADDR(dwStartAddr), &methodTable)) != S_OK)
{
ExtOut("Invalid EEClass address\n");
return Status;
}
DacpMethodTableData mtdata;
if ((Status=mtdata.Request(g_sos, TO_CDADDR(methodTable)))!=S_OK)
{
ExtOut("EEClass has an invalid MethodTable address\n");
return Status;
}
sos::MethodTable mt = TO_TADDR(methodTable);
ExtOut("Class Name: %S\n", mt.GetName());
WCHAR fileName[MAX_LONGPATH];
FileNameForModule(TO_TADDR(mtdata.Module), fileName);
ExtOut("mdToken: %p\n", mtdata.cl);
ExtOut("File: %S\n", fileName);
CLRDATA_ADDRESS ParentEEClass = NULL;
if (mtdata.ParentMethodTable)
{
DacpMethodTableData mtdataparent;
if ((Status=mtdataparent.Request(g_sos, TO_CDADDR(mtdata.ParentMethodTable)))!=S_OK)
{
ExtOut("EEClass has an invalid MethodTable address\n");
return Status;
}
ParentEEClass = mtdataparent.Class;
}
DMLOut("Parent Class: %s\n", DMLClass(ParentEEClass));
DMLOut("Module: %s\n", DMLModule(mtdata.Module));
DMLOut("Method Table: %s\n", DMLMethodTable(methodTable));
ExtOut("Vtable Slots: %x\n", mtdata.wNumVirtuals);
ExtOut("Total Method Slots: %x\n", mtdata.wNumVtableSlots);
ExtOut("Class Attributes: %x ", mtdata.dwAttrClass);
if (IsTdInterface(mtdata.dwAttrClass))
ExtOut("Interface, ");
if (IsTdAbstract(mtdata.dwAttrClass))
ExtOut("Abstract, ");
if (IsTdImport(mtdata.dwAttrClass))
ExtOut("ComImport, ");
ExtOut("\n");
DacpMethodTableFieldData vMethodTableFields;
if (SUCCEEDED(vMethodTableFields.Request(g_sos, methodTable)))
{
ExtOut("NumInstanceFields: %x\n", vMethodTableFields.wNumInstanceFields);
ExtOut("NumStaticFields: %x\n", vMethodTableFields.wNumStaticFields);
if (vMethodTableFields.wNumThreadStaticFields != 0)
{
ExtOut("NumThreadStaticFields: %x\n", vMethodTableFields.wNumThreadStaticFields);
}
if (vMethodTableFields.wContextStaticsSize)
{
ExtOut("ContextStaticOffset: %x\n", vMethodTableFields.wContextStaticOffset);
ExtOut("ContextStaticsSize: %x\n", vMethodTableFields.wContextStaticsSize);
}
if (vMethodTableFields.wNumInstanceFields + vMethodTableFields.wNumStaticFields > 0)
{
DisplayFields(methodTable, &mtdata, &vMethodTableFields, NULL, TRUE, FALSE);
}
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a MethodTable *
* from a given address *
* *
\**********************************************************************/
DECLARE_API(DumpMT)
{
DWORD_PTR dwStartAddr=0;
DWORD_PTR dwOriginalAddr;
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL bDumpMDTable = FALSE;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-MD", &bDumpMDTable, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE}
#endif
};
CMDValue arg[] =
{ // vptr, type
{&dwStartAddr, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
TableOutput table(2, 16, AlignLeft, false);
if (nArg == 0)
{
Print("Missing MethodTable address\n");
return Status;
}
dwOriginalAddr = dwStartAddr;
dwStartAddr = dwStartAddr&~3;
if (!IsMethodTable(dwStartAddr))
{
Print(dwOriginalAddr, " is not a MethodTable\n");
return Status;
}
DacpMethodTableData vMethTable;
vMethTable.Request(g_sos, TO_CDADDR(dwStartAddr));
if (vMethTable.bIsFree)
{
Print("Free MethodTable\n");
return Status;
}
DacpMethodTableCollectibleData vMethTableCollectible;
vMethTableCollectible.Request(g_sos, TO_CDADDR(dwStartAddr));
table.WriteRow("EEClass:", EEClassPtr(vMethTable.Class));
table.WriteRow("Module:", ModulePtr(vMethTable.Module));
sos::MethodTable mt = (TADDR)dwStartAddr;
table.WriteRow("Name:", mt.GetName());
WCHAR fileName[MAX_LONGPATH];
FileNameForModule(TO_TADDR(vMethTable.Module), fileName);
table.WriteRow("mdToken:", Pointer(vMethTable.cl));
table.WriteRow("File:", fileName[0] ? fileName : W("Unknown Module"));
if (vMethTableCollectible.LoaderAllocatorObjectHandle != NULL)
{
TADDR loaderAllocator;
if (SUCCEEDED(MOVE(loaderAllocator, vMethTableCollectible.LoaderAllocatorObjectHandle)))
{
table.WriteRow("LoaderAllocator:", ObjectPtr(loaderAllocator));
}
}
table.WriteRow("BaseSize:", PrefixHex(vMethTable.BaseSize));
table.WriteRow("ComponentSize:", PrefixHex(vMethTable.ComponentSize));
table.WriteRow("DynamicStatics:", vMethTable.bIsDynamic ? "true" : "false");
table.WriteRow("ContainsPointers:", vMethTable.bContainsPointers ? "true" : "false");
table.WriteRow("Slots in VTable:", Decimal(vMethTable.wNumMethods));
table.SetColWidth(0, 29);
table.WriteRow("Number of IFaces in IFaceMap:", Decimal(vMethTable.wNumInterfaces));
if (bDumpMDTable)
{
table.ReInit(4, POINTERSIZE_HEX, AlignRight);
table.SetColAlignment(3, AlignLeft);
table.SetColWidth(2, 6);
Print("--------------------------------------\n");
Print("MethodDesc Table\n");
table.WriteRow("Entry", "MethodDesc", "JIT", "Name");
for (DWORD n = 0; n < vMethTable.wNumMethods; n++)
{
JITTypes jitType;
DWORD_PTR methodDesc=0;
DWORD_PTR gcinfoAddr;
CLRDATA_ADDRESS entry;
if (g_sos->GetMethodTableSlot(dwStartAddr, n, &entry) != S_OK)
{
PrintLn("<error getting slot ", Decimal(n), ">");
continue;
}
IP2MethodDesc((DWORD_PTR)entry, methodDesc, jitType, gcinfoAddr);
table.WriteColumn(0, entry);
table.WriteColumn(1, MethodDescPtr(methodDesc));
if (jitType == TYPE_UNKNOWN && methodDesc != NULL)
{
// We can get a more accurate jitType from NativeCodeAddr of the methoddesc,
// because the methodtable entry hasn't always been patched.
DacpMethodDescData tmpMethodDescData;
if (tmpMethodDescData.Request(g_sos, TO_CDADDR(methodDesc)) == S_OK)
{
DacpCodeHeaderData codeHeaderData;
if (codeHeaderData.Request(g_sos,tmpMethodDescData.NativeCodeAddr) == S_OK)
{
jitType = (JITTypes) codeHeaderData.JITType;
}
}
}
const char *pszJitType = "NONE";
if (jitType == TYPE_JIT)
pszJitType = "JIT";
else if (jitType == TYPE_PJIT)
pszJitType = "PreJIT";
else
{
DacpMethodDescData MethodDescData;
if (MethodDescData.Request(g_sos, TO_CDADDR(methodDesc)) == S_OK)
{
// Is it an fcall?
ULONG64 baseAddress = g_pRuntime->GetModuleAddress();
ULONG64 size = g_pRuntime->GetModuleSize();
if ((TO_TADDR(MethodDescData.NativeCodeAddr) >= TO_TADDR(baseAddress)) &&
((TO_TADDR(MethodDescData.NativeCodeAddr) < TO_TADDR(baseAddress + size))))
{
pszJitType = "FCALL";
}
}
}
table.WriteColumn(2, pszJitType);
NameForMD_s(methodDesc,g_mdName,mdNameLen);
table.WriteColumn(3, g_mdName);
}
}
return Status;
}
extern size_t Align (size_t nbytes);
HRESULT PrintVC(TADDR taMT, TADDR taObject, BOOL bPrintFields = TRUE)
{
HRESULT Status;
DacpMethodTableData mtabledata;
if ((Status = mtabledata.Request(g_sos, TO_CDADDR(taMT)))!=S_OK)
return Status;
size_t size = mtabledata.BaseSize;
if ((Status=g_sos->GetMethodTableName(TO_CDADDR(taMT), mdNameLen, g_mdName, NULL))!=S_OK)
return Status;
ExtOut("Name: %S\n", g_mdName);
DMLOut("MethodTable: %s\n", DMLMethodTable(taMT));
DMLOut("EEClass: %s\n", DMLClass(mtabledata.Class));
ExtOut("Size: %d(0x%x) bytes\n", size, size);
FileNameForModule(TO_TADDR(mtabledata.Module), g_mdName);
ExtOut("File: %S\n", g_mdName[0] ? g_mdName : W("Unknown Module"));
if (bPrintFields)
{
DacpMethodTableFieldData vMethodTableFields;
if ((Status = vMethodTableFields.Request(g_sos,TO_CDADDR(taMT)))!=S_OK)
return Status;
ExtOut("Fields:\n");
if (vMethodTableFields.wNumInstanceFields + vMethodTableFields.wNumStaticFields > 0)
DisplayFields(TO_CDADDR(taMT), &mtabledata, &vMethodTableFields, taObject, TRUE, TRUE);
}
return S_OK;
}
void PrintRuntimeTypeInfo(TADDR p_rtObject, const DacpObjectData & rtObjectData)
{
// Get the method table
int iOffset = GetObjFieldOffset(TO_CDADDR(p_rtObject), rtObjectData.MethodTable, W("m_handle"));
if (iOffset > 0)
{
TADDR mtPtr;
if (SUCCEEDED(GetMTOfObject(p_rtObject + iOffset, &mtPtr)))
{
sos::MethodTable mt = mtPtr;
ExtOut("Type Name: %S\n", mt.GetName());
DMLOut("Type MT: %s\n", DMLMethodTable(mtPtr));
}
}
}
HRESULT PrintObj(TADDR taObj, BOOL bPrintFields = TRUE)
{
if (!sos::IsObject(taObj, true))
{
ExtOut("<Note: this object has an invalid CLASS field>\n");
}
DacpObjectData objData;
HRESULT Status;
if ((Status=objData.Request(g_sos, TO_CDADDR(taObj))) != S_OK)
{
ExtOut("Invalid object\n");
return Status;
}
if (objData.ObjectType==OBJ_FREE)
{
ExtOut("Free Object\n");
DWORD_PTR size = (DWORD_PTR)objData.Size;
ExtOut("Size: %" POINTERSIZE_TYPE "d(0x%" POINTERSIZE_TYPE "x) bytes\n", size, size);
return S_OK;
}
sos::Object obj = taObj;
ExtOut("Name: %S\n", obj.GetTypeName());
DMLOut("MethodTable: %s\n", DMLMethodTable(objData.MethodTable));
DacpMethodTableData mtabledata;
if ((Status=mtabledata.Request(g_sos,objData.MethodTable)) == S_OK)
{
DMLOut("EEClass: %s\n", DMLClass(mtabledata.Class));
}
else
{
ExtOut("Invalid EEClass address\n");
return Status;
}
if (objData.RCW != NULL)
{
DMLOut("RCW: %s\n", DMLRCWrapper(objData.RCW));
}
if (objData.CCW != NULL)
{
DMLOut("CCW: %s\n", DMLCCWrapper(objData.CCW));
}
DWORD_PTR size = (DWORD_PTR)objData.Size;
ExtOut("Size: %" POINTERSIZE_TYPE "d(0x%" POINTERSIZE_TYPE "x) bytes\n", size, size);
if (_wcscmp(obj.GetTypeName(), W("System.RuntimeType")) == 0)
{
PrintRuntimeTypeInfo(taObj, objData);
}
if (_wcscmp(obj.GetTypeName(), W("System.RuntimeType+RuntimeTypeCache")) == 0)
{
// Get the method table
int iOffset = GetObjFieldOffset (TO_CDADDR(taObj), objData.MethodTable, W("m_runtimeType"));
if (iOffset > 0)
{
TADDR rtPtr;
if (MOVE(rtPtr, taObj + iOffset) == S_OK)
{
DacpObjectData rtObjectData;
if ((Status=rtObjectData.Request(g_sos, TO_CDADDR(rtPtr))) != S_OK)
{
ExtOut("Error when reading RuntimeType field\n");
return Status;
}
PrintRuntimeTypeInfo(rtPtr, rtObjectData);
}
}
}
if (objData.ObjectType==OBJ_ARRAY)
{
ExtOut("Array: Rank %d, Number of elements %" POINTERSIZE_TYPE "d, Type %s",
objData.dwRank, (DWORD_PTR)objData.dwNumComponents, ElementTypeName(objData.ElementType));
IfDMLOut(" (<exec cmd=\"!DumpArray /d %p\">Print Array</exec>)", SOS_PTR(taObj));
ExtOut("\n");
if (objData.ElementType == ELEMENT_TYPE_I1 ||
objData.ElementType == ELEMENT_TYPE_U1 ||
objData.ElementType == ELEMENT_TYPE_CHAR)
{
bool wide = objData.ElementType == ELEMENT_TYPE_CHAR;
// Get the size of the character array, but clamp it to a reasonable length.
TADDR pos = taObj + (2 * sizeof(DWORD_PTR));
DWORD_PTR num;
moveN(num, taObj + sizeof(DWORD_PTR));
if (IsDMLEnabled())
DMLOut("<exec cmd=\"%s %x L%x\">Content</exec>: ", (wide) ? "dw" : "db", pos, num);
else
ExtOut("Content: ");
CharArrayContent(pos, (ULONG)(num <= 128 ? num : 128), wide);
ExtOut("\n");
}
}
else
{
FileNameForModule(TO_TADDR(mtabledata.Module), g_mdName);
ExtOut("File: %S\n", g_mdName[0] ? g_mdName : W("Unknown Module"));
}
if (objData.ObjectType == OBJ_STRING)
{
ExtOut("String: ");
StringObjectContent(taObj);
ExtOut("\n");
}
else if (objData.ObjectType == OBJ_OBJECT)
{
ExtOut("Object\n");
}
if (bPrintFields)
{
DacpMethodTableFieldData vMethodTableFields;
if ((Status = vMethodTableFields.Request(g_sos,TO_CDADDR(objData.MethodTable)))!=S_OK)
return Status;
ExtOut("Fields:\n");
if (vMethodTableFields.wNumInstanceFields + vMethodTableFields.wNumStaticFields > 0)
{
DisplayFields(objData.MethodTable, &mtabledata, &vMethodTableFields, taObj, TRUE, FALSE);
}
else
{
ExtOut("None\n");
}
}
sos::ThinLockInfo lockInfo;
if (obj.GetThinLock(lockInfo))
{
ExtOut("ThinLock owner %x (%p), Recursive %x\n", lockInfo.ThreadId,
SOS_PTR(lockInfo.ThreadPtr), lockInfo.Recursion);
}
return S_OK;
}
BOOL IndicesInRange (DWORD * indices, DWORD * lowerBounds, DWORD * bounds, DWORD rank)
{
int i = 0;
if (!ClrSafeInt<int>::subtraction((int)rank, 1, i))
{
ExtOut("<integer underflow>\n");
return FALSE;
}
for (; i >= 0; i--)
{
if (indices[i] >= bounds[i] + lowerBounds[i])
{
if (i == 0)
{
return FALSE;
}
indices[i] = lowerBounds[i];
indices[i - 1]++;
}
}
return TRUE;
}
void ExtOutIndices (DWORD * indices, DWORD rank)
{
for (DWORD i = 0; i < rank; i++)
{
ExtOut("[%d]", indices[i]);
}
}
size_t OffsetFromIndices (DWORD * indices, DWORD * lowerBounds, DWORD * bounds, DWORD rank)
{
_ASSERTE(rank >= 0);
size_t multiplier = 1;
size_t offset = 0;
int i = 0;
if (!ClrSafeInt<int>::subtraction((int)rank, 1, i))
{
ExtOut("<integer underflow>\n");
return 0;
}
for (; i >= 0; i--)
{
DWORD curIndex = indices[i] - lowerBounds[i];
offset += curIndex * multiplier;
multiplier *= bounds[i];
}
return offset;
}
HRESULT PrintArray(DacpObjectData& objData, DumpArrayFlags& flags, BOOL isPermSetPrint);
#ifdef _DEBUG
HRESULT PrintPermissionSet (TADDR p_PermSet)
{
HRESULT Status = S_OK;
DacpObjectData PermSetData;
if ((Status=PermSetData.Request(g_sos, TO_CDADDR(p_PermSet))) != S_OK)
{
ExtOut("Invalid object\n");
return Status;
}
sos::MethodTable mt = TO_TADDR(PermSetData.MethodTable);
if (_wcscmp (W("System.Security.PermissionSet"), mt.GetName()) != 0 && _wcscmp(W("System.Security.NamedPermissionSet"), mt.GetName()) != 0)
{
ExtOut("Invalid PermissionSet object\n");
return S_FALSE;
}
ExtOut("PermissionSet object: %p\n", SOS_PTR(p_PermSet));
// Print basic info
// Walk the fields, printing some fields in a special way.
int iOffset = GetObjFieldOffset (TO_CDADDR(p_PermSet), PermSetData.MethodTable, W("m_Unrestricted"));
if (iOffset > 0)
{
BYTE unrestricted;
MOVE(unrestricted, p_PermSet + iOffset);
if (unrestricted)
ExtOut("Unrestricted: TRUE\n");
else
ExtOut("Unrestricted: FALSE\n");
}
iOffset = GetObjFieldOffset (TO_CDADDR(p_PermSet), PermSetData.MethodTable, W("m_permSet"));
if (iOffset > 0)
{
TADDR tbSetPtr;
MOVE(tbSetPtr, p_PermSet + iOffset);
if (tbSetPtr != NULL)
{
DacpObjectData tbSetData;
if ((Status=tbSetData.Request(g_sos, TO_CDADDR(tbSetPtr))) != S_OK)
{
ExtOut("Invalid object\n");
return Status;
}
iOffset = GetObjFieldOffset (TO_CDADDR(tbSetPtr), tbSetData.MethodTable, W("m_Set"));
if (iOffset > 0)
{
DWORD_PTR PermsArrayPtr;
MOVE(PermsArrayPtr, tbSetPtr + iOffset);
if (PermsArrayPtr != NULL)
{
// Print all the permissions in the array
DacpObjectData objData;
if ((Status=objData.Request(g_sos, TO_CDADDR(PermsArrayPtr))) != S_OK)
{
ExtOut("Invalid object\n");
return Status;
}
DumpArrayFlags flags;
flags.bDetail = TRUE;
return PrintArray(objData, flags, TRUE);
}
}
iOffset = GetObjFieldOffset (TO_CDADDR(tbSetPtr), tbSetData.MethodTable, W("m_Obj"));
if (iOffset > 0)
{
DWORD_PTR PermObjPtr;
MOVE(PermObjPtr, tbSetPtr + iOffset);
if (PermObjPtr != NULL)
{
// Print the permission object
return PrintObj(PermObjPtr);
}
}
}
}
return Status;
}
#endif // _DEBUG
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of an object from a *
* given address
* *
\**********************************************************************/
DECLARE_API(DumpArray)
{
INIT_API();
DumpArrayFlags flags;
MINIDUMP_NOT_SUPPORTED();
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-start", &flags.startIndex, COSIZE_T, TRUE},
{"-length", &flags.Length, COSIZE_T, TRUE},
{"-details", &flags.bDetail, COBOOL, FALSE},
{"-nofields", &flags.bNoFieldsForElement, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&flags.strObject, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
DWORD_PTR p_Object = GetExpression (flags.strObject);
if (p_Object == 0)
{
ExtOut("Invalid parameter %s\n", flags.strObject);
return Status;
}
if (!sos::IsObject(p_Object, true))
{
ExtOut("<Note: this object has an invalid CLASS field>\n");
}
DacpObjectData objData;
if ((Status=objData.Request(g_sos, TO_CDADDR(p_Object))) != S_OK)
{
ExtOut("Invalid object\n");
return Status;
}
if (objData.ObjectType != OBJ_ARRAY)
{
ExtOut("Not an array, please use !DumpObj instead\n");
return S_OK;
}
return PrintArray(objData, flags, FALSE);
}
HRESULT PrintArray(DacpObjectData& objData, DumpArrayFlags& flags, BOOL isPermSetPrint)
{
HRESULT Status = S_OK;
if (objData.dwRank != 1 && (flags.Length != (DWORD_PTR)-1 ||flags.startIndex != 0))
{
ExtOut("For multi-dimension array, length and start index are supported\n");
return S_OK;
}
if (flags.startIndex > objData.dwNumComponents)
{
ExtOut("Start index out of range\n");
return S_OK;
}
if (!flags.bDetail && flags.bNoFieldsForElement)
{
ExtOut("-nofields has no effect unless -details is specified\n");
}
DWORD i;
if (!isPermSetPrint)
{
// TODO: don't depend on this being a MethodTable
NameForMT_s(TO_TADDR(objData.ElementTypeHandle), g_mdName, mdNameLen);
ExtOut("Name: %S[", g_mdName);
for (i = 1; i < objData.dwRank; i++)
ExtOut(",");
ExtOut("]\n");
DMLOut("MethodTable: %s\n", DMLMethodTable(objData.MethodTable));
{
DacpMethodTableData mtdata;
if (SUCCEEDED(mtdata.Request(g_sos, objData.MethodTable)))
{
DMLOut("EEClass: %s\n", DMLClass(mtdata.Class));
}
}
DWORD_PTR size = (DWORD_PTR)objData.Size;
ExtOut("Size: %" POINTERSIZE_TYPE "d(0x%" POINTERSIZE_TYPE "x) bytes\n", size, size);
ExtOut("Array: Rank %d, Number of elements %" POINTERSIZE_TYPE "d, Type %s\n",
objData.dwRank, (DWORD_PTR)objData.dwNumComponents, ElementTypeName(objData.ElementType));
DMLOut("Element Methodtable: %s\n", DMLMethodTable(objData.ElementTypeHandle));
}
BOOL isElementValueType = IsElementValueType(objData.ElementType);
DWORD dwRankAllocSize;
if (!ClrSafeInt<DWORD>::multiply(sizeof(DWORD), objData.dwRank, dwRankAllocSize))
{
ExtOut("Integer overflow on array rank\n");
return Status;
}
DWORD *lowerBounds = (DWORD *)alloca(dwRankAllocSize);
if (!SafeReadMemory(objData.ArrayLowerBoundsPtr, lowerBounds, dwRankAllocSize, NULL))
{
ExtOut("Failed to read lower bounds info from the array\n");
return S_OK;
}
DWORD *bounds = (DWORD *)alloca(dwRankAllocSize);
if (!SafeReadMemory (objData.ArrayBoundsPtr, bounds, dwRankAllocSize, NULL))
{
ExtOut("Failed to read bounds info from the array\n");
return S_OK;
}
//length is only supported for single-dimension array
if (objData.dwRank == 1 && flags.Length != (DWORD_PTR)-1)
{
bounds[0] = _min(bounds[0], (DWORD)(flags.Length + flags.startIndex) - lowerBounds[0]);
}
DWORD *indices = (DWORD *)alloca(dwRankAllocSize);
for (i = 0; i < objData.dwRank; i++)
{
indices[i] = lowerBounds[i];
}
//start index is only supported for single-dimension array
if (objData.dwRank == 1)
{
indices[0] = (DWORD)flags.startIndex;
}
//Offset should be calculated by OffsetFromIndices. However because of the way
//how we grow indices, incrementing offset by one happens to match indices in every iteration
for (size_t offset = OffsetFromIndices (indices, lowerBounds, bounds, objData.dwRank);
IndicesInRange (indices, lowerBounds, bounds, objData.dwRank);
indices[objData.dwRank - 1]++, offset++)
{
if (IsInterrupt())
{
ExtOut("interrupted by user\n");
break;
}
TADDR elementAddress = TO_TADDR(objData.ArrayDataPtr + offset * objData.dwComponentSize);
TADDR p_Element = NULL;
if (isElementValueType)
{
p_Element = elementAddress;
}
else if (!SafeReadMemory (elementAddress, &p_Element, sizeof (p_Element), NULL))
{
ExtOut("Failed to read element at ");
ExtOutIndices(indices, objData.dwRank);
ExtOut("\n");
continue;
}
if (p_Element)
{
ExtOutIndices(indices, objData.dwRank);
if (isElementValueType)
{
DMLOut( " %s\n", DMLValueClass(objData.ElementTypeHandle, p_Element));
}
else
{
DMLOut(" %s\n", DMLObject(p_Element));
}
}
else if (!isPermSetPrint)
{
ExtOutIndices(indices, objData.dwRank);
ExtOut(" null\n");
}
if (flags.bDetail)
{
IncrementIndent();
if (isElementValueType)
{
PrintVC(TO_TADDR(objData.ElementTypeHandle), elementAddress, !flags.bNoFieldsForElement);
}
else if (p_Element != NULL)
{
PrintObj(p_Element, !flags.bNoFieldsForElement);
}
DecrementIndent();
}
}
return S_OK;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of an object from a *
* given address
* *
\**********************************************************************/
DECLARE_API(DumpObj)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL dml = FALSE;
BOOL bNoFields = FALSE;
BOOL bRefs = FALSE;
StringHolder str_Object;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-nofields", &bNoFields, COBOOL, FALSE},
{"-refs", &bRefs, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&str_Object.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
DWORD_PTR p_Object = GetExpression(str_Object.data);
EnableDMLHolder dmlHolder(dml);
if (p_Object == 0)
{
ExtOut("Invalid parameter %s\n", args);
return Status;
}
try {
Status = PrintObj(p_Object, !bNoFields);
if (SUCCEEDED(Status) && bRefs)
{
ExtOut("GC Refs:\n");
TableOutput out(2, POINTERSIZE_HEX, AlignRight, 4);
out.WriteRow("offset", "object");
for (sos::RefIterator itr(TO_TADDR(p_Object)); itr; ++itr)
out.WriteRow(Hex(itr.GetOffset()), ObjectPtr(*itr));
}
}
catch(const sos::Exception &e)
{
ExtOut("%s\n", e.what());
return E_FAIL;
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a delegate from a *
* given address. *
* *
\**********************************************************************/
DECLARE_API(DumpDelegate)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
try
{
BOOL dml = FALSE;
DWORD_PTR dwAddr = 0;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE}
};
CMDValue arg[] =
{ // vptr, type
{&dwAddr, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg != 1)
{
ExtOut("Usage: !DumpDelegate <delegate object address>\n");
return Status;
}
EnableDMLHolder dmlHolder(dml);
CLRDATA_ADDRESS delegateAddr = TO_CDADDR(dwAddr);
if (!sos::IsObject(delegateAddr))
{
ExtOut("Invalid object.\n");
}
else
{
sos::Object delegateObj = TO_TADDR(delegateAddr);
if (!IsDerivedFrom(TO_CDADDR(delegateObj.GetMT()), W("System.Delegate")))
{
ExtOut("Object of type '%S' is not a delegate.", delegateObj.GetTypeName());
}
else
{
ExtOut("Target Method Name\n");
std::vector<CLRDATA_ADDRESS> delegatesRemaining;
delegatesRemaining.push_back(delegateAddr);
while (delegatesRemaining.size() > 0)
{
delegateAddr = delegatesRemaining.back();
delegatesRemaining.pop_back();
delegateObj = TO_TADDR(delegateAddr);
int offset;
if ((offset = GetObjFieldOffset(delegateObj.GetAddress(), delegateObj.GetMT(), W("_target"))) != 0)
{
CLRDATA_ADDRESS target;
MOVE(target, delegateObj.GetAddress() + offset);
if ((offset = GetObjFieldOffset(delegateObj.GetAddress(), delegateObj.GetMT(), W("_invocationList"))) != 0)
{
CLRDATA_ADDRESS invocationList;
MOVE(invocationList, delegateObj.GetAddress() + offset);
if ((offset = GetObjFieldOffset(delegateObj.GetAddress(), delegateObj.GetMT(), W("_invocationCount"))) != 0)
{
int invocationCount;
MOVE(invocationCount, delegateObj.GetAddress() + offset);
if (invocationList == NULL)
{
CLRDATA_ADDRESS md;
DMLOut("%s ", DMLObject(target));
if (TryGetMethodDescriptorForDelegate(delegateAddr, &md))
{
DMLOut("%s ", DMLMethodDesc(md));
NameForMD_s((DWORD_PTR)md, g_mdName, mdNameLen);
ExtOut("%S\n", g_mdName);
}
else
{
ExtOut("(unknown)\n");
}
}
else if (sos::IsObject(invocationList, false))
{
DacpObjectData objData;
if (objData.Request(g_sos, invocationList) == S_OK &&
objData.ObjectType == OBJ_ARRAY &&
invocationCount <= objData.dwNumComponents)
{
for (int i = 0; i < invocationCount; i++)
{
CLRDATA_ADDRESS elementPtr;
MOVE(elementPtr, TO_CDADDR(objData.ArrayDataPtr + (i * objData.dwComponentSize)));
if (elementPtr != NULL && sos::IsObject(elementPtr, false))
{
delegatesRemaining.push_back(elementPtr);
}
}
}
}
}
}
}
}
}
}
return S_OK;
}
catch (const sos::Exception &e)
{
ExtOut("%s\n", e.what());
return E_FAIL;
}
}
CLRDATA_ADDRESS isExceptionObj(CLRDATA_ADDRESS mtObj)
{
// We want to follow back until we get the mt for System.Exception
DacpMethodTableData dmtd;
CLRDATA_ADDRESS walkMT = mtObj;
while(walkMT != NULL)
{
if (dmtd.Request(g_sos, walkMT) != S_OK)
{
break;
}
if (walkMT == g_special_usefulGlobals.ExceptionMethodTable)
{
return walkMT;
}
walkMT = dmtd.ParentMethodTable;
}
return NULL;
}
CLRDATA_ADDRESS isSecurityExceptionObj(CLRDATA_ADDRESS mtObj)
{
// We want to follow back until we get the mt for System.Exception
DacpMethodTableData dmtd;
CLRDATA_ADDRESS walkMT = mtObj;
while(walkMT != NULL)
{
if (dmtd.Request(g_sos, walkMT) != S_OK)
{
break;
}
NameForMT_s(TO_TADDR(walkMT), g_mdName, mdNameLen);
if (_wcscmp(W("System.Security.SecurityException"), g_mdName) == 0)
{
return walkMT;
}
walkMT = dmtd.ParentMethodTable;
}
return NULL;
}
// Fill the passed in buffer with a text header for generated exception information.
// Returns the number of characters in the wszBuffer array on exit.
// If NULL is passed for wszBuffer, just returns the number of characters needed.
size_t AddExceptionHeader (__out_ecount_opt(bufferLength) WCHAR *wszBuffer, size_t bufferLength)
{
#ifdef _TARGET_WIN64_
const WCHAR *wszHeader = W(" SP IP Function\n");
#else
const WCHAR *wszHeader = W(" SP IP Function\n");
#endif // _TARGET_WIN64_
if (wszBuffer)
{
swprintf_s(wszBuffer, bufferLength, wszHeader);
}
return _wcslen(wszHeader);
}
enum StackTraceElementFlags
{
// Set if this element represents the last frame of the foreign exception stack trace
STEF_LAST_FRAME_FROM_FOREIGN_STACK_TRACE = 0x0001,
// Set if the "ip" field has already been adjusted (decremented)
STEF_IP_ADJUSTED = 0x0002,
};
// This struct needs to match the definition in the runtime.
// See: https://github.com/dotnet/runtime/blob/master/src/coreclr/src/vm/clrex.h
struct StackTraceElement
{
UINT_PTR ip;
UINT_PTR sp;
DWORD_PTR pFunc; // MethodDesc
INT flags; // This is StackTraceElementFlags but it needs to always be "int" sized for backward compatibility.
};
#include "sos_stacktrace.h"
#include "sildasm.h"
class StringOutput
{
public:
CQuickString cs;
StringOutput()
{
cs.Alloc(1024);
cs.String()[0] = L'\0';
}
BOOL Append(__in_z LPCWSTR pszStr)
{
size_t iInputLen = _wcslen (pszStr);
size_t iCurLen = _wcslen (cs.String());
if ((iCurLen + iInputLen + 1) > cs.Size())
{
if (cs.ReSize(iCurLen + iInputLen + 1) != S_OK)
{
return FALSE;
}
}
wcsncat_s (cs.String(), cs.Size(), pszStr, _TRUNCATE);
return TRUE;
}
size_t Length()
{
return _wcslen(cs.String());
}
WCHAR *String()
{
return cs.String();
}
};
static HRESULT DumpMDInfoBuffer(DWORD_PTR dwStartAddr, DWORD Flags, ULONG64 Esp, ULONG64 IPAddr, StringOutput& so);
// Using heuristics to determine if an exception object represented an async (hardware) or a
// managed exception
// We need to use these heuristics when the System.Exception object is not the active exception
// on some thread, but it's something found somewhere on the managed heap.
// uses the MapWin32FaultToCOMPlusException to figure out how we map async exceptions
// to managed exceptions and their HRESULTs
static const HRESULT AsyncHResultValues[] =
{
COR_E_ARITHMETIC, // kArithmeticException
COR_E_OVERFLOW, // kOverflowException
COR_E_DIVIDEBYZERO, // kDivideByZeroException
COR_E_FORMAT, // kFormatException
COR_E_NULLREFERENCE, // kNullReferenceException
E_POINTER, // kAccessViolationException
// the EE is raising the next exceptions more often than the OS will raise an async
// exception for these conditions, so in general treat these as Synchronous
// COR_E_INDEXOUTOFRANGE, // kIndexOutOfRangeException
// COR_E_OUTOFMEMORY, // kOutOfMemoryException
// COR_E_STACKOVERFLOW, // kStackOverflowException
COR_E_DATAMISALIGNED, // kDataMisalignedException
};
BOOL IsAsyncException(CLRDATA_ADDRESS taObj, CLRDATA_ADDRESS mtObj)
{
// by default we'll treat exceptions as synchronous
UINT32 xcode = EXCEPTION_COMPLUS;
int iOffset = GetObjFieldOffset (taObj, mtObj, W("_xcode"));
if (iOffset > 0)
{
HRESULT hr = MOVE(xcode, taObj + iOffset);
if (hr != S_OK)
{
xcode = EXCEPTION_COMPLUS;
goto Done;
}
}
if (xcode == EXCEPTION_COMPLUS)
{
HRESULT ehr = 0;
iOffset = GetObjFieldOffset (taObj, mtObj, W("_HResult"));
if (iOffset > 0)
{
HRESULT hr = MOVE(ehr, taObj + iOffset);
if (hr != S_OK)
{
xcode = EXCEPTION_COMPLUS;
goto Done;
}
for (size_t idx = 0; idx < _countof(AsyncHResultValues); ++idx)
{
if (ehr == AsyncHResultValues[idx])
{
xcode = ehr;
break;
}
}
}
}
Done:
return xcode != EXCEPTION_COMPLUS;
}
// Overload that mirrors the code above when the ExceptionObjectData was already retrieved from LS
BOOL IsAsyncException(const DacpExceptionObjectData & excData)
{
if (excData.XCode != EXCEPTION_COMPLUS)
return TRUE;
HRESULT ehr = excData.HResult;
for (size_t idx = 0; idx < _countof(AsyncHResultValues); ++idx)
{
if (ehr == AsyncHResultValues[idx])
{
return TRUE;
}
}
return FALSE;
}
#define SOS_STACKTRACE_SHOWEXPLICITFRAMES 0x00000002
size_t FormatGeneratedException (DWORD_PTR dataPtr,
UINT bytes,
__out_ecount_opt(bufferLength) WCHAR *wszBuffer,
size_t bufferLength,
BOOL bAsync, // hardware exception if true
BOOL bNestedCase = FALSE,
BOOL bLineNumbers = FALSE)
{
UINT count = bytes / sizeof(StackTraceElement);
size_t Length = 0;
_ASSERTE(g_targetMachine != nullptr);
if (wszBuffer && bufferLength > 0)
{
wszBuffer[0] = L'\0';
}
// Buffer is calculated for sprintf below (" %p %p %S\n");
WCHAR wszLineBuffer[mdNameLen + 8 + sizeof(size_t)*2 + MAX_LONGPATH + 8];
if (count == 0)
{
return 0;
}
if (bNestedCase)
{
// If we are computing the call stack for a nested exception, we
// don't want to print the last frame, because the outer exception
// will have that frame.
count--;
}
for (UINT i = 0; i < count; i++)
{
StackTraceElement ste;
MOVE (ste, dataPtr + i*sizeof(StackTraceElement));
// ste.ip must be adjusted because of an ancient workaround in the exception
// infrastructure. The workaround is that the exception needs to have
// an ip address that will map to the line number where the exception was thrown.
// (It doesn't matter that it's not a valid instruction). (see /vm/excep.cpp)
//
// This "counterhack" is not 100% accurate
// The biggest issue is that !PrintException must work with exception objects
// that may not be currently active; as a consequence we cannot rely on the
// state of some "current thread" to infer whether the IP values stored in
// the exception object have been adjusted or not. If we could, we may examine
// the topmost "Frame" and make the decision based on whether it's a
// FaultingExceptionFrame or not.
// 1. On IA64 the IP values are never adjusted by the EE so there's nothing
// to adjust back.
// 2. On AMD64:
// (a) if the exception was an async (hardware) exception add 1 to all
// IP values in the exception object
// (b) if the exception was a managed exception (either raised by the
// EE or thrown by managed code) do not adjust any IP values
// 3. On X86:
// (a) if the exception was an async (hardware) exception add 1 to
// all but the topmost IP value in the exception object
// (b) if the exception was a managed exception (either raised by
// the EE or thrown by managed code) add 1 to all IP values in
// the exception object
#if defined(_TARGET_AMD64_)
if (bAsync)
{
ste.ip += 1;
}
#elif defined(_TARGET_X86_)
if (IsDbgTargetX86() && (!bAsync || i != 0))
{
ste.ip += 1;
}
#endif // defined(_TARGET_AMD64_) || defined(_TARGET__X86_)
StringOutput so;
HRESULT Status = DumpMDInfoBuffer(ste.pFunc, SOS_STACKTRACE_SHOWADDRESSES|SOS_STACKTRACE_SHOWEXPLICITFRAMES, ste.sp, ste.ip, so);
// If DumpMDInfoBuffer failed (due to out of memory or missing metadata),
// or did not update so (when ste is an explicit frames), do not update wszBuffer
if (Status == S_OK)
{
WCHAR filename[MAX_LONGPATH] = W("");
ULONG linenum = 0;
if (bLineNumbers &&
// To get the source line number of the actual code that threw an exception, the IP needs
// to be adjusted in certain cases.
//
// The IP of the stack frame points to either:
//
// 1) The instruction that caused a hardware exception (div by zero, null ref, etc).
// 2) The instruction after the call to an internal runtime function (FCALL like IL_Throw,
// IL_Rethrow, JIT_OverFlow, etc.) that caused a software exception.
// 3) The instruction after the call to a managed function (non-leaf node).
//
// #2 and #3 are the cases that need to adjust IP because they point after the call instruction
// and may point to the next (incorrect) IL instruction/source line. We distinguish these from
// #1 by the bAsync flag which is set to true for hardware exceptions and that it is a leaf node
// (i == 0).
//
// When the IP needs to be adjusted it is a lot simpler to decrement IP instead of trying to figure
// out the beginning of the instruction. It is enough for GetLineByOffset to return the correct line number.
//
// The unmodified IP is displayed (above by DumpMDInfoBuffer) which points after the exception in most
// cases. This means that the printed IP and the printed line number often will not map to one another
// and this is intentional.
SUCCEEDED(GetLineByOffset(TO_CDADDR(ste.ip), &linenum, filename, _countof(filename), !bAsync || i > 0)))
{
swprintf_s(wszLineBuffer, _countof(wszLineBuffer), W(" %s [%s @ %d]\n"), so.String(), filename, linenum);
}
else
{
swprintf_s(wszLineBuffer, _countof(wszLineBuffer), W(" %s\n"), so.String());
}
Length += _wcslen(wszLineBuffer);
if (wszBuffer)
{
wcsncat_s(wszBuffer, bufferLength, wszLineBuffer, _TRUNCATE);
}
}
}
return Length;
}
// ExtOut has an internal limit for the string size
void SosExtOutLargeString(__inout_z __inout_ecount_opt(len) WCHAR * pwszLargeString, size_t len)
{
const size_t chunkLen = 2048;
WCHAR *pwsz = pwszLargeString; // beginning of a chunk
size_t count = len/chunkLen;
// write full chunks
for (size_t idx = 0; idx < count; ++idx)
{
WCHAR *pch = pwsz + chunkLen; // after the chunk
// zero terminate the chunk
WCHAR ch = *pch;
*pch = L'\0';
ExtOut("%S", pwsz);
// restore whacked char
*pch = ch;
// advance to next chunk
pwsz += chunkLen;
}
// last chunk
ExtOut("%S", pwsz);
}
HRESULT FormatException(CLRDATA_ADDRESS taObj, BOOL bLineNumbers = FALSE)
{
HRESULT Status = S_OK;
DacpObjectData objData;
if ((Status=objData.Request(g_sos, taObj)) != S_OK)
{
ExtOut("Invalid object\n");
return Status;
}
// Make sure it is an exception object, and get the MT of Exception
CLRDATA_ADDRESS exceptionMT = isExceptionObj(objData.MethodTable);
if (exceptionMT == NULL)
{
ExtOut("Not a valid exception object\n");
return Status;
}
DMLOut("Exception object: %s\n", DMLObject(taObj));
if (NameForMT_s(TO_TADDR(objData.MethodTable), g_mdName, mdNameLen))
{
ExtOut("Exception type: %S\n", g_mdName);
}
else
{
ExtOut("Exception type: <Unknown>\n");
}
// Print basic info
// First try to get exception object data using ISOSDacInterface2
DacpExceptionObjectData excData;
BOOL bGotExcData = SUCCEEDED(excData.Request(g_sos, taObj));
// Walk the fields, printing some fields in a special way.
// HR, InnerException, Message, StackTrace, StackTraceString
{
TADDR taMsg = 0;
if (bGotExcData)
{
taMsg = TO_TADDR(excData.Message);
}
else
{
int iOffset = GetObjFieldOffset(taObj, objData.MethodTable, W("_message"));
if (iOffset > 0)
{
MOVE (taMsg, taObj + iOffset);
}
}
ExtOut("Message: ");
if (taMsg)
StringObjectContent(taMsg);
else
ExtOut("<none>");
ExtOut("\n");
}
{
TADDR taInnerExc = 0;
if (bGotExcData)
{
taInnerExc = TO_TADDR(excData.InnerException);
}
else
{
int iOffset = GetObjFieldOffset(taObj, objData.MethodTable, W("_innerException"));
if (iOffset > 0)
{
MOVE (taInnerExc, taObj + iOffset);
}
}
ExtOut("InnerException: ");
if (taInnerExc)
{
TADDR taMT;
if (SUCCEEDED(GetMTOfObject(taInnerExc, &taMT)))
{
NameForMT_s(taMT, g_mdName, mdNameLen);
ExtOut("%S, ", g_mdName);
if (IsDMLEnabled())
DMLOut("Use <exec cmd=\"!PrintException /d %p\">!PrintException %p</exec> to see more.\n", taInnerExc, taInnerExc);
else
ExtOut("Use !PrintException %p to see more.\n", SOS_PTR(taInnerExc));
}
else
{
ExtOut("<invalid MethodTable of inner exception>");
}
}
else
{
ExtOut("<none>\n");
}
}
BOOL bAsync = bGotExcData ? IsAsyncException(excData)
: IsAsyncException(taObj, objData.MethodTable);
{
TADDR taStackTrace = 0;
if (bGotExcData)
{
taStackTrace = TO_TADDR(excData.StackTrace);
}
else
{
int iOffset = GetObjFieldOffset (taObj, objData.MethodTable, W("_stackTrace"));
if (iOffset > 0)
{
MOVE(taStackTrace, taObj + iOffset);
}
}
ExtOut("StackTrace (generated):\n");
if (taStackTrace)
{
DWORD arrayLen;
HRESULT hr = MOVE(arrayLen, taStackTrace + sizeof(DWORD_PTR));
if (arrayLen != 0 && hr == S_OK)
{
// This code is accessing the StackTraceInfo class in the runtime.
// See: https://github.com/dotnet/runtime/blob/master/src/coreclr/src/vm/clrex.h
#ifdef _TARGET_WIN64_
DWORD_PTR dataPtr = taStackTrace + sizeof(DWORD_PTR) + sizeof(DWORD) + sizeof(DWORD);
#else
DWORD_PTR dataPtr = taStackTrace + sizeof(DWORD_PTR) + sizeof(DWORD);
#endif // _TARGET_WIN64_
size_t stackTraceSize = 0;
MOVE (stackTraceSize, dataPtr);
DWORD cbStackSize = static_cast<DWORD>(stackTraceSize * sizeof(StackTraceElement));
dataPtr += sizeof(size_t) + sizeof(size_t); // skip the array header, then goes the data
if (stackTraceSize == 0)
{
ExtOut("Unable to decipher generated stack trace\n");
}
else
{
size_t iHeaderLength = AddExceptionHeader (NULL, 0);
size_t iLength = FormatGeneratedException (dataPtr, cbStackSize, NULL, 0, bAsync, FALSE, bLineNumbers);
WCHAR *pwszBuffer = new NOTHROW WCHAR[iHeaderLength + iLength + 1];
if (pwszBuffer)
{
AddExceptionHeader(pwszBuffer, iHeaderLength + 1);
FormatGeneratedException(dataPtr, cbStackSize, pwszBuffer + iHeaderLength, iLength + 1, bAsync, FALSE, bLineNumbers);
SosExtOutLargeString(pwszBuffer, iHeaderLength + iLength + 1);
delete[] pwszBuffer;
}
ExtOut("\n");
}
}
else
{
ExtOut("<Not Available>\n");
}
}
else
{
ExtOut("<none>\n");
}
}
{
TADDR taStackString;
if (bGotExcData)
{
taStackString = TO_TADDR(excData.StackTraceString);
}
else
{
int iOffset = GetObjFieldOffset (taObj, objData.MethodTable, W("_stackTraceString"));
MOVE (taStackString, taObj + iOffset);
}
ExtOut("StackTraceString: ");
if (taStackString)
{
StringObjectContent(taStackString);
ExtOut("\n\n"); // extra newline looks better
}
else
{
ExtOut("<none>\n");
}
}
{
DWORD hResult;
if (bGotExcData)
{
hResult = excData.HResult;
}
else
{
int iOffset = GetObjFieldOffset (taObj, objData.MethodTable, W("_HResult"));
MOVE (hResult, taObj + iOffset);
}
ExtOut("HResult: %lx\n", hResult);
}
if (isSecurityExceptionObj(objData.MethodTable) != NULL)
{
// We have a SecurityException Object: print out the debugString if present
int iOffset = GetObjFieldOffset (taObj, objData.MethodTable, W("m_debugString"));
if (iOffset > 0)
{
TADDR taDebugString;
MOVE (taDebugString, taObj + iOffset);
if (taDebugString)
{
ExtOut("SecurityException Message: ");
StringObjectContent(taDebugString);
ExtOut("\n\n"); // extra newline looks better
}
}
}
return Status;
}
DECLARE_API(PrintException)
{
INIT_API();
BOOL dml = FALSE;
BOOL bShowNested = FALSE;
BOOL bLineNumbers = FALSE;
BOOL bCCW = FALSE;
StringHolder strObject;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-nested", &bShowNested, COBOOL, FALSE},
{"-lines", &bLineNumbers, COBOOL, FALSE},
{"-l", &bLineNumbers, COBOOL, FALSE},
{"-ccw", &bCCW, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE}
#endif
};
CMDValue arg[] =
{ // vptr, type
{&strObject, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (bLineNumbers)
{
ULONG symlines = 0;
if (SUCCEEDED(g_ExtSymbols->GetSymbolOptions(&symlines)))
{
symlines &= SYMOPT_LOAD_LINES;
}
if (symlines == 0)
{
ExtOut("In order for the option -lines to enable display of source information\n"
"the debugger must be configured to load the line number information from\n"
"the symbol files. Use the \".lines; .reload\" command to achieve this.\n");
// don't even try
bLineNumbers = FALSE;
}
}
EnableDMLHolder dmlHolder(dml);
DWORD_PTR p_Object = NULL;
if (nArg == 0)
{
if (bCCW)
{
ExtOut("No CCW pointer specified\n");
return Status;
}
// Look at the last exception object on this thread
CLRDATA_ADDRESS threadAddr = GetCurrentManagedThread();
DacpThreadData Thread;
if ((threadAddr == NULL) || (Thread.Request(g_sos, threadAddr) != S_OK))
{
ExtOut("The current thread is unmanaged\n");
return Status;
}
DWORD_PTR dwAddr = NULL;
if ((!SafeReadMemory(TO_TADDR(Thread.lastThrownObjectHandle),
&dwAddr,
sizeof(dwAddr), NULL)) || (dwAddr==NULL))
{
ExtOut("There is no current managed exception on this thread\n");
}
else
{
p_Object = dwAddr;
}
}
else
{
p_Object = GetExpression(strObject.data);
if (p_Object == 0)
{
if (bCCW)
{
ExtOut("Invalid CCW pointer %s\n", args);
}
else
{
ExtOut("Invalid exception object %s\n", args);
}
return Status;
}
if (bCCW)
{
// check if the address is a CCW pointer and then
// get the exception object from it
DacpCCWData ccwData;
if (ccwData.Request(g_sos, p_Object) == S_OK)
{
p_Object = TO_TADDR(ccwData.managedObject);
}
}
}
if (p_Object)
{
FormatException(TO_CDADDR(p_Object), bLineNumbers);
}
// Are there nested exceptions?
CLRDATA_ADDRESS threadAddr = GetCurrentManagedThread();
DacpThreadData Thread;
if ((threadAddr == NULL) || (Thread.Request(g_sos, threadAddr) != S_OK))
{
ExtOut("The current thread is unmanaged\n");
return Status;
}
if (Thread.firstNestedException)
{
if (!bShowNested)
{
ExtOut("There are nested exceptions on this thread. Run with -nested for details\n");
return Status;
}
CLRDATA_ADDRESS currentNested = Thread.firstNestedException;
do
{
CLRDATA_ADDRESS obj = 0, next = 0;
Status = g_sos->GetNestedExceptionData(currentNested, &obj, &next);
if (Status != S_OK)
{
ExtOut("Error retrieving nested exception info %p\n", SOS_PTR(currentNested));
return Status;
}
if (IsInterrupt())
{
ExtOut("<aborted>\n");
return Status;
}
ExtOut("\nNested exception -------------------------------------------------------------\n");
Status = FormatException(obj, bLineNumbers);
if (Status != S_OK)
{
return Status;
}
currentNested = next;
}
while(currentNested != NULL);
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of an object from a *
* given address
* *
\**********************************************************************/
DECLARE_API(DumpVC)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR p_MT = NULL;
DWORD_PTR p_Object = NULL;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE}
#endif
};
CMDValue arg[] =
{ // vptr, type
{&p_MT, COHEX},
{&p_Object, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (nArg!=2)
{
ExtOut("Usage: !DumpVC <Method Table> <Value object start addr>\n");
return Status;
}
if (!IsMethodTable(p_MT))
{
ExtOut("Not a managed object\n");
return S_OK;
}
return PrintVC(p_MT, p_Object);
}
#ifndef FEATURE_PAL
#ifdef FEATURE_COMINTEROP
DECLARE_API(DumpRCW)
{
INIT_API();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
BOOL dml = FALSE;
StringHolder strObject;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE}
};
CMDValue arg[] =
{ // vptr, type
{&strObject, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (nArg == 0)
{
ExtOut("Missing RCW address\n");
return Status;
}
else
{
DWORD_PTR p_RCW = GetExpression(strObject.data);
if (p_RCW == 0)
{
ExtOut("Invalid RCW %s\n", args);
}
else
{
DacpRCWData rcwData;
if ((Status = rcwData.Request(g_sos, p_RCW)) != S_OK)
{
ExtOut("Error requesting RCW data\n");
return Status;
}
BOOL isDCOMProxy;
if (FAILED(rcwData.IsDCOMProxy(g_sos, p_RCW, &isDCOMProxy)))
{
isDCOMProxy = FALSE;
}
DMLOut("Managed object: %s\n", DMLObject(rcwData.managedObject));
DMLOut("Creating thread: %p\n", SOS_PTR(rcwData.creatorThread));
ExtOut("IUnknown pointer: %p\n", SOS_PTR(rcwData.unknownPointer));
ExtOut("COM Context: %p\n", SOS_PTR(rcwData.ctxCookie));
ExtOut("Managed ref count: %d\n", rcwData.refCount);
ExtOut("IUnknown V-table pointer : %p (captured at RCW creation time)\n", SOS_PTR(rcwData.vtablePtr));
ExtOut("Flags: %s%s%s%s%s%s%s%s\n",
(rcwData.isDisconnected? "IsDisconnected " : ""),
(rcwData.supportsIInspectable? "SupportsIInspectable " : ""),
(rcwData.isAggregated? "IsAggregated " : ""),
(rcwData.isContained? "IsContained " : ""),
(rcwData.isJupiterObject? "IsJupiterObject " : ""),
(rcwData.isFreeThreaded? "IsFreeThreaded " : ""),
(rcwData.identityPointer == TO_CDADDR(p_RCW)? "IsUnique " : ""),
(isDCOMProxy ? "IsDCOMProxy " : "")
);
// Jupiter data hidden by default
if (rcwData.isJupiterObject)
{
ExtOut("IJupiterObject: %p\n", SOS_PTR(rcwData.jupiterObject));
}
ExtOut("COM interface pointers:\n");
ArrayHolder<DacpCOMInterfacePointerData> pArray = new NOTHROW DacpCOMInterfacePointerData[rcwData.interfaceCount];
if (pArray == NULL)
{
ReportOOM();
return Status;
}
if ((Status = g_sos->GetRCWInterfaces(p_RCW, rcwData.interfaceCount, pArray, NULL)) != S_OK)
{
ExtOut("Error requesting COM interface pointers\n");
return Status;
}
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s Type\n", "IP", "Context", "MT");
for (int i = 0; i < rcwData.interfaceCount; i++)
{
// Ignore any NULL MethodTable interface cache. At this point only IJupiterObject
// is saved as NULL MethodTable at first slot, and we've already printed outs its
// value earlier.
if (pArray[i].methodTable == NULL)
continue;
NameForMT_s(TO_TADDR(pArray[i].methodTable), g_mdName, mdNameLen);
DMLOut("%p %p %s %S\n", SOS_PTR(pArray[i].interfacePtr), SOS_PTR(pArray[i].comContext), DMLMethodTable(pArray[i].methodTable), g_mdName);
}
}
}
return Status;
}
DECLARE_API(DumpCCW)
{
INIT_API();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
BOOL dml = FALSE;
StringHolder strObject;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE}
};
CMDValue arg[] =
{ // vptr, type
{&strObject, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (nArg == 0)
{
ExtOut("Missing CCW address\n");
return Status;
}
else
{
DWORD_PTR p_CCW = GetExpression(strObject.data);
if (p_CCW == 0)
{
ExtOut("Invalid CCW %s\n", args);
}
else
{
DacpCCWData ccwData;
if ((Status = ccwData.Request(g_sos, p_CCW)) != S_OK)
{
ExtOut("Error requesting CCW data\n");
return Status;
}
if (ccwData.ccwAddress != p_CCW)
ExtOut("CCW: %p\n", SOS_PTR(ccwData.ccwAddress));
DMLOut("Managed object: %s\n", DMLObject(ccwData.managedObject));
ExtOut("Outer IUnknown: %p\n", SOS_PTR(ccwData.outerIUnknown));
ExtOut("Ref count: %d%s\n", ccwData.refCount, ccwData.isNeutered ? " (NEUTERED)" : "");
ExtOut("Flags: %s%s\n",
(ccwData.isExtendsCOMObject? "IsExtendsCOMObject " : ""),
(ccwData.isAggregated? "IsAggregated " : "")
);
// Jupiter information hidden by default
if (ccwData.jupiterRefCount > 0)
{
ExtOut("Jupiter ref count: %d%s%s%s%s\n",
ccwData.jupiterRefCount,
(ccwData.isPegged || ccwData.isGlobalPegged) ? ", Pegged by" : "",
ccwData.isPegged ? " Jupiter " : "",
(ccwData.isPegged && ccwData.isGlobalPegged) ? "&" : "",
ccwData.isGlobalPegged ? " CLR " : ""
);
}
ExtOut("RefCounted Handle: %p%s\n",
SOS_PTR(ccwData.handle),
(ccwData.hasStrongRef ? " (STRONG)" : " (WEAK)"));
ExtOut("COM interface pointers:\n");
ArrayHolder<DacpCOMInterfacePointerData> pArray = new NOTHROW DacpCOMInterfacePointerData[ccwData.interfaceCount];
if (pArray == NULL)
{
ReportOOM();
return Status;
}
if ((Status = g_sos->GetCCWInterfaces(p_CCW, ccwData.interfaceCount, pArray, NULL)) != S_OK)
{
ExtOut("Error requesting COM interface pointers\n");
return Status;
}
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s Type\n", "IP", "MT", "Type");
for (int i = 0; i < ccwData.interfaceCount; i++)
{
if (pArray[i].methodTable == NULL)
{
wcscpy_s(g_mdName, mdNameLen, W("IDispatch/IUnknown"));
}
else
{
NameForMT_s(TO_TADDR(pArray[i].methodTable), g_mdName, mdNameLen);
}
DMLOut("%p %s %S\n", pArray[i].interfacePtr, DMLMethodTable(pArray[i].methodTable), g_mdName);
}
}
}
return Status;
}
#endif // FEATURE_COMINTEROP
#ifdef _DEBUG
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a PermissionSet *
* from a given address. *
* *
\**********************************************************************/
/*
COMMAND: dumppermissionset.
!DumpPermissionSet <PermissionSet object address>
This command allows you to examine a PermissionSet object. Note that you can
also use DumpObj such an object in greater detail. DumpPermissionSet attempts
to extract all the relevant information from a PermissionSet that you might be
interested in when performing Code Access Security (CAS) related debugging.
Here is a simple PermissionSet object:
0:000> !DumpPermissionSet 014615f4
PermissionSet object: 014615f4
Unrestricted: TRUE
Note that this is an unrestricted PermissionSet object that does not contain
any individual permissions.
Here is another example of a PermissionSet object, one that is not unrestricted
and contains a single permission:
0:003> !DumpPermissionSet 01469fa8
PermissionSet object: 01469fa8
Unrestricted: FALSE
Name: System.Security.Permissions.ReflectionPermission
MethodTable: 5b731308
EEClass: 5b7e0d78
Size: 12(0xc) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_32\mscorlib\2.0.
0.0__b77a5c561934e089\mscorlib.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5b73125c 4001d66 4 System.Int32 0 instance 2 m_flags
Here is another example of an unrestricted PermissionSet, one that contains
several permissions. The numbers in parentheses before each Permission object
represents the index of that Permission in the PermissionSet.
0:003> !DumpPermissionSet 01467bd8
PermissionSet object: 01467bd8
Unrestricted: FALSE
[1] 01467e90
Name: System.Security.Permissions.FileDialogPermission
MethodTable: 5b73023c
EEClass: 5b7dfb18
Size: 12(0xc) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_32\mscorlib\2.0.0.0__b77a5c561934e089\mscorlib.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5b730190 4001cc2 4 System.Int32 0 instance 1 access
[4] 014682a8
Name: System.Security.Permissions.ReflectionPermission
MethodTable: 5b731308
EEClass: 5b7e0d78
Size: 12(0xc) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_32\mscorlib\2.0.0.0__b77a5c561934e089\mscorlib.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5b73125c 4001d66 4 System.Int32 0 instance 0 m_flags
[17] 0146c060
Name: System.Diagnostics.EventLogPermission
MethodTable: 569841c4
EEClass: 56a03e5c
Size: 28(0x1c) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_MSIL\System\2.0.0.0__b77a5c561934e089\System.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5b6d65d4 4003078 4 System.Object[] 0 instance 0146c190 tagNames
5b6c9ed8 4003079 8 System.Type 0 instance 0146c17c permissionAccessType
5b6cd928 400307a 10 System.Boolean 0 instance 0 isUnrestricted
5b6c45f8 400307b c ...ections.Hashtable 0 instance 0146c1a4 rootTable
5b6c090c 4003077 bfc System.String 0 static 00000000 computerName
56984434 40030e7 14 ...onEntryCollection 0 instance 00000000 innerCollection
[18] 0146ceb4
Name: System.Net.WebPermission
MethodTable: 5696dfc4
EEClass: 569e256c
Size: 20(0x14) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_MSIL\System\2.0.0.0__b77a5c561934e089\System.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5b6cd928 400238e c System.Boolean 0 instance 0 m_Unrestricted
5b6cd928 400238f d System.Boolean 0 instance 0 m_UnrestrictedConnect
5b6cd928 4002390 e System.Boolean 0 instance 0 m_UnrestrictedAccept
5b6c639c 4002391 4 ...ections.ArrayList 0 instance 0146cf3c m_connectList
5b6c639c 4002392 8 ...ections.ArrayList 0 instance 0146cf54 m_acceptList
569476f8 4002393 8a4 ...Expressions.Regex 0 static 00000000 s_MatchAllRegex
[19] 0146a5fc
Name: System.Net.DnsPermission
MethodTable: 56966408
EEClass: 569d3c08
Size: 12(0xc) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_MSIL\System\2.0.0.0__b77a5c561934e089\System.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5b6cd928 4001d2c 4 System.Boolean 0 instance 1 m_noRestriction
[20] 0146d8ec
Name: System.Web.AspNetHostingPermission
MethodTable: 569831bc
EEClass: 56a02ccc
Size: 12(0xc) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_MSIL\System\2.0.0.0__b77a5c561934e089\System.dll)
Fields:
MT Field Offset Type VT Attr Value Name
56983090 4003074 4 System.Int32 0 instance 600 _level
[21] 0146e394
Name: System.Net.NetworkInformation.NetworkInformationPermission
MethodTable: 5697ac70
EEClass: 569f7104
Size: 16(0x10) bytes
(C:\WINDOWS\Microsoft.NET\Framework\v2.0.x86chk\assembly\GAC_MSIL\System\2.0.0.0__b77a5c561934e089\System.dll)
Fields:
MT Field Offset Type VT Attr Value Name
5697ab38 4002c34 4 System.Int32 0 instance 0 access
5b6cd928 4002c35 8 System.Boolean 0 instance 0 unrestricted
The abbreviation !dps can be used for brevity.
\\
*/
DECLARE_API(DumpPermissionSet)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR p_Object = NULL;
CMDValue arg[] =
{
{&p_Object, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg!=1)
{
ExtOut("Usage: !DumpPermissionSet <PermissionSet object addr>\n");
return Status;
}
return PrintPermissionSet(p_Object);
}
#endif // _DEBUG
void GCPrintGenerationInfo(DacpGcHeapDetails &heap);
void GCPrintSegmentInfo(DacpGcHeapDetails &heap, DWORD_PTR &total_size);
#endif // FEATURE_PAL
void DisplayInvalidStructuresMessage()
{
ExtOut("The garbage collector data structures are not in a valid state for traversal.\n");
ExtOut("It is either in the \"plan phase,\" where objects are being moved around, or\n");
ExtOut("we are at the initialization or shutdown of the gc heap. Commands related to \n");
ExtOut("displaying, finding or traversing objects as well as gc heap segments may not \n");
ExtOut("work properly. !dumpheap and !verifyheap may incorrectly complain of heap \n");
ExtOut("consistency errors.\n");
}
/**********************************************************************\
* Routine Description: *
* *
* This function dumps GC heap size. *
* *
\**********************************************************************/
DECLARE_API(EEHeap)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL dml = FALSE;
BOOL showgc = FALSE;
BOOL showloader = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-gc", &showgc, COBOOL, FALSE},
{"-loader", &showloader, COBOOL, FALSE},
{"/d", &dml, COBOOL, FALSE},
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (showloader || !showgc)
{
// Loader heap.
DWORD_PTR allHeapSize = 0;
DWORD_PTR wasted = 0;
DacpAppDomainStoreData adsData;
if ((Status=adsData.Request(g_sos))!=S_OK)
{
ExtOut("Unable to get AppDomain information\n");
return Status;
}
// The first one is the system domain.
ExtOut("Loader Heap:\n");
IfFailRet(PrintDomainHeapInfo("System Domain", adsData.systemDomain, &allHeapSize, &wasted));
if (adsData.sharedDomain != NULL)
{
IfFailRet(PrintDomainHeapInfo("Shared Domain", adsData.sharedDomain, &allHeapSize, &wasted));
}
ArrayHolder<CLRDATA_ADDRESS> pArray = new NOTHROW CLRDATA_ADDRESS[adsData.DomainCount];
if (pArray==NULL)
{
ReportOOM();
return Status;
}
if ((Status=g_sos->GetAppDomainList(adsData.DomainCount, pArray, NULL))!=S_OK)
{
ExtOut("Unable to get the array of all AppDomains.\n");
return Status;
}
for (int n=0;n<adsData.DomainCount;n++)
{
if (IsInterrupt())
break;
char domain[16];
sprintf_s(domain, _countof(domain), "Domain %d", n+1);
IfFailRet(PrintDomainHeapInfo(domain, pArray[n], &allHeapSize, &wasted));
}
// Jit code heap
ExtOut("--------------------------------------\n");
ExtOut("Jit code heap:\n");
if (IsMiniDumpFile())
{
ExtOut("<no information>\n");
}
else
{
allHeapSize += JitHeapInfo();
}
// Module Data
{
int numModule;
ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(NULL, &numModule);
if (moduleList == NULL)
{
ExtOut("Failed to request module list.\n");
}
else
{
// Module Thunk Heaps
ExtOut("--------------------------------------\n");
ExtOut("Module Thunk heaps:\n");
allHeapSize += PrintModuleHeapInfo(moduleList, numModule, ModuleHeapType_ThunkHeap, &wasted);
// Module Lookup Table Heaps
ExtOut("--------------------------------------\n");
ExtOut("Module Lookup Table heaps:\n");
allHeapSize += PrintModuleHeapInfo(moduleList, numModule, ModuleHeapType_LookupTableHeap, &wasted);
}
}
ExtOut("--------------------------------------\n");
ExtOut("Total LoaderHeap size: ");
PrintHeapSize(allHeapSize, wasted);
ExtOut("=======================================\n");
}
if (showgc || !showloader)
{
// GC Heap
DWORD dwNHeaps = 1;
if (!GetGcStructuresValid())
{
DisplayInvalidStructuresMessage();
}
DacpGcHeapData gcheap;
if (gcheap.Request(g_sos) != S_OK)
{
ExtOut("Error requesting GC Heap data\n");
return Status;
}
if (gcheap.bServerMode)
{
dwNHeaps = gcheap.HeapCount;
}
ExtOut("Number of GC Heaps: %d\n", dwNHeaps);
DWORD_PTR totalSize = 0;
if (!gcheap.bServerMode)
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos) != S_OK)
{
ExtOut("Error requesting details\n");
return Status;
}
GCHeapInfo (heapDetails, totalSize);
ExtOut("Total Size: ");
PrintHeapSize(totalSize, 0);
}
else
{
DWORD dwAllocSize;
if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
{
ExtOut("Failed to get GCHeaps: integer overflow\n");
return Status;
}
CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
{
ExtOut("Failed to get GCHeaps\n");
return Status;
}
DWORD n;
for (n = 0; n < dwNHeaps; n ++)
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
{
ExtOut("Error requesting details\n");
return Status;
}
ExtOut("------------------------------\n");
ExtOut("Heap %d (%p)\n", n, SOS_PTR(heapAddrs[n]));
DWORD_PTR heapSize = 0;
GCHeapInfo (heapDetails, heapSize);
totalSize += heapSize;
ExtOut("Heap Size: " WIN86_8SPACES);
PrintHeapSize(heapSize, 0);
}
}
ExtOut("------------------------------\n");
ExtOut("GC Heap Size: " WIN86_8SPACES);
PrintHeapSize(totalSize, 0);
}
return Status;
}
void PrintGCStat(HeapStat *inStat, const char* label=NULL)
{
if (inStat)
{
bool sorted = false;
try
{
inStat->Sort();
sorted = true;
inStat->Print(label);
}
catch(...)
{
ExtOut("Exception occurred while trying to %s the GC stats.\n", sorted ? "print" : "sort");
}
inStat->Delete();
}
}
#ifndef FEATURE_PAL
DECLARE_API(TraverseHeap)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
BOOL bXmlFormat = FALSE;
BOOL bVerify = FALSE;
StringHolder Filename;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-xml", &bXmlFormat, COBOOL, FALSE},
{"-verify", &bVerify, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&Filename.data, COSTRING},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg != 1)
{
ExtOut("usage: HeapTraverse [-xml] filename\n");
return Status;
}
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return Status;
}
FILE* file = NULL;
if (fopen_s(&file, Filename.data, "w") != 0) {
ExtOut("Unable to open file\n");
return Status;
}
if (!bVerify)
ExtOut("Assuming a uncorrupted GC heap. If this is a crash dump consider -verify option\n");
HeapTraverser traverser(bVerify != FALSE);
ExtOut("Writing %s format to file %s\n", bXmlFormat ? "Xml" : "CLRProfiler", Filename.data);
ExtOut("Gathering types...\n");
// TODO: there may be a canonical list of methodtables in the runtime that we can
// traverse instead of exploring the gc heap for that list. We could then simplify the
// tree structure to a sorted list of methodtables, and the index is the ID.
// TODO: "Traversing object members" code should be generalized and shared between
// !gcroot and !traverseheap. Also !dumpheap can begin using GCHeapsTraverse.
if (!traverser.Initialize())
{
ExtOut("Error initializing heap traversal\n");
fclose(file);
return Status;
}
if (!traverser.CreateReport (file, bXmlFormat ? FORMAT_XML : FORMAT_CLRPROFILER))
{
ExtOut("Unable to write heap report\n");
fclose(file);
return Status;
}
fclose(file);
ExtOut("\nfile %s saved\n", Filename.data);
return Status;
}
#endif // FEATURE_PAL
struct PrintRuntimeTypeArgs
{
DWORD_PTR mtOfRuntimeType;
int handleFieldOffset;
DacpAppDomainStoreData adstore;
};
void PrintRuntimeTypes(DWORD_PTR objAddr,size_t Size,DWORD_PTR methodTable,LPVOID token)
{
PrintRuntimeTypeArgs *pArgs = (PrintRuntimeTypeArgs *)token;
if (pArgs->mtOfRuntimeType == NULL)
{
NameForMT_s(methodTable, g_mdName, mdNameLen);
if (_wcscmp(g_mdName, W("System.RuntimeType")) == 0)
{
pArgs->mtOfRuntimeType = methodTable;
pArgs->handleFieldOffset = GetObjFieldOffset(TO_CDADDR(objAddr), TO_CDADDR(methodTable), W("m_handle"));
if (pArgs->handleFieldOffset <= 0)
ExtOut("Error getting System.RuntimeType.m_handle offset\n");
pArgs->adstore.Request(g_sos);
}
}
if ((methodTable == pArgs->mtOfRuntimeType) && (pArgs->handleFieldOffset > 0))
{
// Get the method table and display the information.
DWORD_PTR mtPtr;
if (MOVE(mtPtr, objAddr + pArgs->handleFieldOffset) == S_OK)
{
DMLOut(DMLObject(objAddr));
CLRDATA_ADDRESS appDomain = GetAppDomainForMT(mtPtr);
if (appDomain != NULL)
{
if (appDomain == pArgs->adstore.sharedDomain)
ExtOut(" %" POINTERSIZE "s", "Shared");
else if (appDomain == pArgs->adstore.systemDomain)
ExtOut(" %" POINTERSIZE "s", "System");
else
DMLOut(" %s", DMLDomain(appDomain));
}
else
{
ExtOut(" %" POINTERSIZE "s", "?");
}
NameForMT_s(mtPtr, g_mdName, mdNameLen);
DMLOut(" %s %S\n", DMLMethodTable(mtPtr), g_mdName);
}
}
}
DECLARE_API(DumpRuntimeTypes)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
return Status;
EnableDMLHolder dmlHolder(dml);
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s Type Name \n",
"Address", "Domain", "MT");
ExtOut("------------------------------------------------------------------------------\n");
PrintRuntimeTypeArgs pargs;
ZeroMemory(&pargs, sizeof(PrintRuntimeTypeArgs));
GCHeapsTraverse(PrintRuntimeTypes, (LPVOID)&pargs);
return Status;
}
#define MIN_FRAGMENTATIONBLOCK_BYTES (1024*512)
namespace sos
{
class FragmentationBlock
{
public:
FragmentationBlock(TADDR addr, size_t size, TADDR next, TADDR mt)
: mAddress(addr), mSize(size), mNext(next), mNextMT(mt)
{
}
inline TADDR GetAddress() const
{
return mAddress;
}
inline size_t GetSize() const
{
return mSize;
}
inline TADDR GetNextObject() const
{
return mNext;
}
inline TADDR GetNextMT() const
{
return mNextMT;
}
private:
TADDR mAddress;
size_t mSize;
TADDR mNext;
TADDR mNextMT;
};
}
class DumpHeapImpl
{
public:
DumpHeapImpl(PCSTR args)
: mStart(0), mStop(0), mMT(0), mMinSize(0), mMaxSize(~0),
mStat(FALSE), mStrings(FALSE), mVerify(FALSE),
mThinlock(FALSE), mShort(FALSE), mDML(FALSE),
mLive(FALSE), mDead(FALSE), mType(NULL)
{
ArrayHolder<char> type = NULL;
TADDR minTemp = 0;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-mt", &mMT, COHEX, TRUE}, // dump objects with a given MethodTable
{"-type", &type, COSTRING, TRUE}, // list objects of specified type
{"-stat", &mStat, COBOOL, FALSE}, // dump a summary of types and the number of instances of each
{"-strings", &mStrings, COBOOL, FALSE}, // dump a summary of string objects
{"-verify", &mVerify, COBOOL, FALSE}, // verify heap objects (!heapverify)
{"-thinlock", &mThinlock, COBOOL, FALSE},// list only thinlocks
{"-short", &mShort, COBOOL, FALSE}, // list only addresses
{"-min", &mMinSize, COHEX, TRUE}, // min size of objects to display
{"-max", &mMaxSize, COHEX, TRUE}, // max size of objects to display
{"-live", &mLive, COHEX, FALSE}, // only print live objects
{"-dead", &mDead, COHEX, FALSE}, // only print dead objects
#ifndef FEATURE_PAL
{"/d", &mDML, COBOOL, FALSE}, // Debugger Markup Language
#endif
};
CMDValue arg[] =
{ // vptr, type
{&mStart, COHEX},
{&mStop, COHEX}
};
size_t nArgs = 0;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArgs))
sos::Throw<sos::Exception>("Failed to parse command line arguments.");
if (mStart == 0)
mStart = minTemp;
if (mStop == 0)
mStop = sos::GCHeap::HeapEnd;
if (type && mMT)
{
sos::Throw<sos::Exception>("Cannot specify both -mt and -type");
}
if (mLive && mDead)
{
sos::Throw<sos::Exception>("Cannot specify both -live and -dead.");
}
if (mMinSize > mMaxSize)
{
sos::Throw<sos::Exception>("wrong argument");
}
// If the user gave us a type, convert it to unicode and clean up "type".
if (type && !mStrings)
{
size_t iLen = strlen(type) + 1;
mType = new WCHAR[iLen];
MultiByteToWideChar(CP_ACP, 0, type, -1, mType, (int)iLen);
}
}
~DumpHeapImpl()
{
if (mType)
delete [] mType;
}
void Run()
{
// enable Debugger Markup Language
EnableDMLHolder dmlholder(mDML);
sos::GCHeap gcheap;
if (!gcheap.AreGCStructuresValid())
DisplayInvalidStructuresMessage();
if (IsMiniDumpFile())
{
ExtOut("In a minidump without full memory, most gc heap structures will not be valid.\n");
ExtOut("If you need this functionality, get a full memory dump with \".dump /ma mydump.dmp\"\n");
}
#ifndef FEATURE_PAL
if (IsWindowsTarget() && (mLive || mDead))
{
GCRootImpl gcroot;
mLiveness = gcroot.GetLiveObjects();
}
#endif
// Some of the "specialty" versions of DumpHeap have slightly
// different implementations than the standard version of DumpHeap.
// We seperate them out to not clutter the standard DumpHeap function.
if (mShort)
DumpHeapShort(gcheap);
else if (mThinlock)
DumpHeapThinlock(gcheap);
else if (mStrings)
DumpHeapStrings(gcheap);
else
DumpHeap(gcheap);
if (mVerify)
ValidateSyncTable(gcheap);
}
static bool ValidateSyncTable(sos::GCHeap &gcheap)
{
bool succeeded = true;
for (sos::SyncBlkIterator itr; itr; ++itr)
{
sos::CheckInterrupt();
if (!itr->IsFree())
{
if (!sos::IsObject(itr->GetObject(), true))
{
ExtOut("SyncBlock %d corrupted, points to invalid object %p\n",
itr->GetIndex(), SOS_PTR(itr->GetObject()));
succeeded = false;
}
else
{
// Does the object header point to this syncblock index?
sos::Object obj = itr->GetObject();
ULONG header = 0;
if (!obj.TryGetHeader(header))
{
ExtOut("Failed to get object header for object %p while inspecting syncblock at index %d.\n",
SOS_PTR(itr->GetObject()), itr->GetIndex());
succeeded = false;
}
else
{
bool valid = false;
if ((header & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) != 0 && (header & BIT_SBLK_IS_HASHCODE) == 0)
{
ULONG index = header & MASK_SYNCBLOCKINDEX;
valid = (ULONG)itr->GetIndex() == index;
}
if (!valid)
{
ExtOut("Object header for %p should have a SyncBlock index of %d.\n",
SOS_PTR(itr->GetObject()), itr->GetIndex());
succeeded = false;
}
}
}
}
}
return succeeded;
}
private:
DumpHeapImpl(const DumpHeapImpl &);
bool Verify(const sos::ObjectIterator &itr)
{
if (mVerify)
{
char buffer[1024];
if (!itr.Verify(buffer, _countof(buffer)))
{
ExtOut(buffer);
return false;
}
}
return true;
}
bool IsCorrectType(const sos::Object &obj)
{
if (mMT != NULL)
return mMT == obj.GetMT();
if (mType != NULL)
{
WString name = obj.GetTypeName();
return _wcsstr(name.c_str(), mType) != NULL;
}
return true;
}
bool IsCorrectSize(const sos::Object &obj)
{
size_t size = obj.GetSize();
return size >= mMinSize && size <= mMaxSize;
}
bool IsCorrectLiveness(const sos::Object &obj)
{
#ifndef FEATURE_PAL
if (IsWindowsTarget() && mLive && mLiveness.find(obj.GetAddress()) == mLiveness.end())
return false;
if (IsWindowsTarget() && mDead && (mLiveness.find(obj.GetAddress()) != mLiveness.end() || obj.IsFree()))
return false;
#endif
return true;
}
inline void PrintHeader()
{
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %8s\n", "Address", "MT", "Size");
}
void DumpHeap(sos::GCHeap &gcheap)
{
HeapStat stats;
// For heap fragmentation tracking.
TADDR lastFreeObj = NULL;
size_t lastFreeSize = 0;
if (!mStat)
PrintHeader();
for (sos::ObjectIterator itr = gcheap.WalkHeap(mStart, mStop); itr; ++itr)
{
if (!Verify(itr))
return;
bool onLOH = itr.IsCurrObjectOnLOH();
// Check for free objects to report fragmentation
if (lastFreeObj != NULL)
ReportFreeObject(lastFreeObj, lastFreeSize, itr->GetAddress(), itr->GetMT());
if (!onLOH && itr->IsFree())
{
lastFreeObj = *itr;
lastFreeSize = itr->GetSize();
}
else
{
lastFreeObj = NULL;
}
if (IsCorrectType(*itr) && IsCorrectSize(*itr) && IsCorrectLiveness(*itr))
{
stats.Add((DWORD_PTR)itr->GetMT(), (DWORD)itr->GetSize());
if (!mStat)
DMLOut("%s %s %8d%s\n", DMLObject(itr->GetAddress()), DMLDumpHeapMT(itr->GetMT()), itr->GetSize(),
itr->IsFree() ? " Free":" ");
}
}
if (!mStat)
ExtOut("\n");
stats.Sort();
stats.Print();
PrintFragmentationReport();
}
struct StringSetEntry
{
StringSetEntry() : count(0), size(0)
{
str[0] = 0;
}
StringSetEntry(__in_ecount(64) WCHAR tmp[64], size_t _size)
: count(1), size(_size)
{
memcpy(str, tmp, sizeof(str));
}
void Add(size_t _size) const
{
count++;
size += _size;
}
mutable size_t count;
mutable size_t size;
WCHAR str[64];
bool operator<(const StringSetEntry &rhs) const
{
return _wcscmp(str, rhs.str) < 0;
}
};
static bool StringSetCompare(const StringSetEntry &a1, const StringSetEntry &a2)
{
return a1.size < a2.size;
}
void DumpHeapStrings(sos::GCHeap &gcheap)
{
#ifdef FEATURE_PAL
ExtOut("Not implemented.\n");
#else
if (!IsWindowsTarget())
{
ExtOut("Not implemented.\n");
return;
}
const int offset = sos::Object::GetStringDataOffset();
typedef std::set<StringSetEntry> Set;
Set set; // A set keyed off of the string's text
StringSetEntry tmp; // Temp string used to keep track of the set
ULONG fetched = 0;
TableOutput out(3, POINTERSIZE_HEX, AlignRight);
for (sos::ObjectIterator itr = gcheap.WalkHeap(mStart, mStop); itr; ++itr)
{
if (IsInterrupt())
break;
if (itr->IsString() && IsCorrectSize(*itr) && IsCorrectLiveness(*itr))
{
CLRDATA_ADDRESS addr = itr->GetAddress();
size_t size = itr->GetSize();
if (!mStat)
out.WriteRow(ObjectPtr(addr), Pointer(itr->GetMT()), Decimal(size));
// Don't bother calculating the size of the string, just read the full 64 characters of the buffer. The null
// terminator we read will terminate the string.
HRESULT hr = g_ExtData->ReadVirtual(TO_CDADDR(addr+offset), tmp.str, sizeof(WCHAR)*(_countof(tmp.str)-1), &fetched);
if (SUCCEEDED(hr))
{
// Ensure we null terminate the string. Note that this will not overrun the buffer as we only
// wrote a max of 63 characters into the 64 character buffer.
tmp.str[fetched/sizeof(WCHAR)] = 0;
Set::iterator sitr = set.find(tmp);
if (sitr == set.end())
{
tmp.size = size;
tmp.count = 1;
set.insert(tmp);
}
else
{
sitr->Add(size);
}
}
}
}
ExtOut("\n");
// Now flatten the set into a vector. This is much faster than keeping two sets, or using a multimap.
typedef std::vector<StringSetEntry> Vect;
Vect v(set.begin(), set.end());
std::sort(v.begin(), v.end(), &DumpHeapImpl::StringSetCompare);
// Now print out the data. The call to Flatten ensures that we don't print newlines to break up the
// output in strange ways.
for (Vect::iterator vitr = v.begin(); vitr != v.end(); ++vitr)
{
if (IsInterrupt())
break;
Flatten(vitr->str, (unsigned int)_wcslen(vitr->str));
out.WriteRow(Decimal(vitr->size), Decimal(vitr->count), vitr->str);
}
#endif // FEATURE_PAL
}
void DumpHeapShort(sos::GCHeap &gcheap)
{
for (sos::ObjectIterator itr = gcheap.WalkHeap(mStart, mStop); itr; ++itr)
{
if (!Verify(itr))
return;
if (IsCorrectType(*itr) && IsCorrectSize(*itr) && IsCorrectLiveness(*itr))
DMLOut("%s\n", DMLObject(itr->GetAddress()));
}
}
void DumpHeapThinlock(sos::GCHeap &gcheap)
{
int count = 0;
PrintHeader();
for (sos::ObjectIterator itr = gcheap.WalkHeap(mStart, mStop); itr; ++itr)
{
if (!Verify(itr))
return;
sos::ThinLockInfo lockInfo;
if (IsCorrectType(*itr) && itr->GetThinLock(lockInfo))
{
DMLOut("%s %s %8d", DMLObject(itr->GetAddress()), DMLDumpHeapMT(itr->GetMT()), itr->GetSize());
ExtOut(" ThinLock owner %x (%p) Recursive %x\n", lockInfo.ThreadId,
SOS_PTR(lockInfo.ThreadPtr), lockInfo.Recursion);
count++;
}
}
ExtOut("Found %d objects.\n", count);
}
private:
TADDR mStart,
mStop,
mMT,
mMinSize,
mMaxSize;
BOOL mStat,
mStrings,
mVerify,
mThinlock,
mShort,
mDML,
mLive,
mDead;
WCHAR *mType;
private:
#if !defined(FEATURE_PAL)
// Windows only
std::unordered_set<TADDR> mLiveness;
typedef std::list<sos::FragmentationBlock> FragmentationList;
FragmentationList mFrag;
void InitFragmentationList()
{
if (!IsWindowsTarget())
{
return;
}
mFrag.clear();
}
void ReportFreeObject(TADDR addr, size_t size, TADDR next, TADDR mt)
{
if (!IsWindowsTarget())
{
return;
}
if (size >= MIN_FRAGMENTATIONBLOCK_BYTES)
mFrag.push_back(sos::FragmentationBlock(addr, size, next, mt));
}
void PrintFragmentationReport()
{
if (!IsWindowsTarget())
{
return;
}
if (mFrag.size() > 0)
{
ExtOut("Fragmented blocks larger than 0.5 MB:\n");
ExtOut("%" POINTERSIZE "s %8s %16s\n", "Addr", "Size", "Followed by");
for (FragmentationList::const_iterator itr = mFrag.begin(); itr != mFrag.end(); ++itr)
{
sos::MethodTable mt = itr->GetNextMT();
ExtOut("%p %6.1fMB " WIN64_8SPACES "%p %S\n",
SOS_PTR(itr->GetAddress()),
((double)itr->GetSize()) / 1024.0 / 1024.0,
SOS_PTR(itr->GetNextObject()),
mt.GetName());
}
}
}
#else
void InitFragmentationList() {}
void ReportFreeObject(TADDR, TADDR, size_t, TADDR) {}
void PrintFragmentationReport() {}
#endif
};
/**********************************************************************\
* Routine Description: *
* *
* This function dumps async state machines on GC heap, *
* displaying details about each async operation found. *
* (May not work if GC is in progress.) *
* *
\**********************************************************************/
void ResolveContinuation(CLRDATA_ADDRESS* contAddr)
{
// Ideally this continuation is itself an async method box.
sos::Object contObj = TO_TADDR(*contAddr);
if (GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("StateMachine")) == 0)
{
// It was something else.
// If it's a standard task continuation, get its task field.
int offset;
if ((offset = GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("m_task"))) != 0)
{
MOVE(*contAddr, contObj.GetAddress() + offset);
if (sos::IsObject(*contAddr, false))
{
contObj = TO_TADDR(*contAddr);
}
}
else
{
// If it's storing an action wrapper, try to follow to that action's target.
if ((offset = GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("m_action"))) != 0)
{
MOVE(*contAddr, contObj.GetAddress() + offset);
if (sos::IsObject(*contAddr, false))
{
contObj = TO_TADDR(*contAddr);
}
}
// If we now have an Action, try to follow through to the delegate's target.
if ((offset = GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("_target"))) != 0)
{
MOVE(*contAddr, contObj.GetAddress() + offset);
if (sos::IsObject(*contAddr, false))
{
contObj = TO_TADDR(*contAddr);
// In some cases, the delegate's target might be a ContinuationWrapper, in which case we want to unwrap that as well.
if (_wcsncmp(contObj.GetTypeName(), W("System.Runtime.CompilerServices.AsyncMethodBuilderCore+ContinuationWrapper"), 74) == 0 &&
(offset = GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("_continuation"))) != 0)
{
MOVE(*contAddr, contObj.GetAddress() + offset);
if (sos::IsObject(*contAddr, false))
{
contObj = TO_TADDR(*contAddr);
if ((offset = GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("_target"))) != 0)
{
MOVE(*contAddr, contObj.GetAddress() + offset);
if (sos::IsObject(*contAddr, false))
{
contObj = TO_TADDR(*contAddr);
}
}
}
}
}
}
}
// Use whatever object we ended with.
*contAddr = contObj.GetAddress();
}
}
bool TryGetContinuation(CLRDATA_ADDRESS addr, CLRDATA_ADDRESS mt, CLRDATA_ADDRESS* contAddr)
{
// Get the continuation field from the task.
int offset = GetObjFieldOffset(addr, mt, W("m_continuationObject"));
if (offset != 0)
{
DWORD_PTR contObjPtr;
MOVE(contObjPtr, addr + offset);
if (sos::IsObject(contObjPtr, false))
{
*contAddr = TO_CDADDR(contObjPtr);
ResolveContinuation(contAddr);
return true;
}
}
return false;
}
struct AsyncRecord
{
CLRDATA_ADDRESS Address;
CLRDATA_ADDRESS MT;
DWORD Size;
CLRDATA_ADDRESS StateMachineAddr;
CLRDATA_ADDRESS StateMachineMT;
BOOL FilteredByOptions;
BOOL IsStateMachine;
BOOL IsValueType;
BOOL IsTopLevel;
int TaskStateFlags;
int StateValue;
std::vector<CLRDATA_ADDRESS> Continuations;
};
bool AsyncRecordIsCompleted(AsyncRecord& ar)
{
const int TASK_STATE_COMPLETED_MASK = 0x1600000;
return (ar.TaskStateFlags & TASK_STATE_COMPLETED_MASK) != 0;
}
const char* GetAsyncRecordStatusDescription(AsyncRecord& ar)
{
const int TASK_STATE_RAN_TO_COMPLETION = 0x1000000;
const int TASK_STATE_FAULTED = 0x200000;
const int TASK_STATE_CANCELED = 0x400000;
if ((ar.TaskStateFlags & TASK_STATE_RAN_TO_COMPLETION) != 0) return "Success";
if ((ar.TaskStateFlags & TASK_STATE_FAULTED) != 0) return "Failed";
if ((ar.TaskStateFlags & TASK_STATE_CANCELED) != 0) return "Canceled";
return "Pending";
}
void ExtOutTaskDelegateMethod(sos::Object& obj)
{
DacpFieldDescData actionField;
int offset = GetObjFieldOffset(obj.GetAddress(), obj.GetMT(), W("m_action"), TRUE, &actionField);
if (offset != 0)
{
CLRDATA_ADDRESS actionAddr;
MOVE(actionAddr, obj.GetAddress() + offset);
CLRDATA_ADDRESS actionMD;
if (actionAddr != NULL && TryGetMethodDescriptorForDelegate(actionAddr, &actionMD))
{
NameForMD_s((DWORD_PTR)actionMD, g_mdName, mdNameLen);
ExtOut("(%S) ", g_mdName);
}
}
}
void ExtOutTaskStateFlagsDescription(int stateFlags)
{
if (stateFlags == 0) return;
ExtOut("State Flags: ");
// TaskCreationOptions.*
if ((stateFlags & 0x01) != 0) ExtOut("PreferFairness ");
if ((stateFlags & 0x02) != 0) ExtOut("LongRunning ");
if ((stateFlags & 0x04) != 0) ExtOut("AttachedToParent ");
if ((stateFlags & 0x08) != 0) ExtOut("DenyChildAttach ");
if ((stateFlags & 0x10) != 0) ExtOut("HideScheduler ");
if ((stateFlags & 0x40) != 0) ExtOut("RunContinuationsAsynchronously ");
// InternalTaskOptions.*
if ((stateFlags & 0x0200) != 0) ExtOut("ContinuationTask ");
if ((stateFlags & 0x0400) != 0) ExtOut("PromiseTask ");
if ((stateFlags & 0x1000) != 0) ExtOut("LazyCancellation ");
if ((stateFlags & 0x2000) != 0) ExtOut("QueuedByRuntime ");
if ((stateFlags & 0x4000) != 0) ExtOut("DoNotDispose ");
// TASK_STATE_*
if ((stateFlags & 0x10000) != 0) ExtOut("STARTED ");
if ((stateFlags & 0x20000) != 0) ExtOut("DELEGATE_INVOKED ");
if ((stateFlags & 0x40000) != 0) ExtOut("DISPOSED ");
if ((stateFlags & 0x80000) != 0) ExtOut("EXCEPTIONOBSERVEDBYPARENT ");
if ((stateFlags & 0x100000) != 0) ExtOut("CANCELLATIONACKNOWLEDGED ");
if ((stateFlags & 0x200000) != 0) ExtOut("FAULTED ");
if ((stateFlags & 0x400000) != 0) ExtOut("CANCELED ");
if ((stateFlags & 0x800000) != 0) ExtOut("WAITING_ON_CHILDREN ");
if ((stateFlags & 0x1000000) != 0) ExtOut("RAN_TO_COMPLETION ");
if ((stateFlags & 0x2000000) != 0) ExtOut("WAITINGFORACTIVATION ");
if ((stateFlags & 0x4000000) != 0) ExtOut("COMPLETION_RESERVED ");
if ((stateFlags & 0x8000000) != 0) ExtOut("THREAD_WAS_ABORTED ");
if ((stateFlags & 0x10000000) != 0) ExtOut("WAIT_COMPLETION_NOTIFICATION ");
if ((stateFlags & 0x20000000) != 0) ExtOut("EXECUTIONCONTEXT_IS_NULL ");
if ((stateFlags & 0x40000000) != 0) ExtOut("TASKSCHEDULED_WAS_FIRED ");
ExtOut("\n");
}
void ExtOutStateMachineFields(AsyncRecord& ar)
{
DacpMethodTableData mtabledata;
DacpMethodTableFieldData vMethodTableFields;
if (mtabledata.Request(g_sos, ar.StateMachineMT) == S_OK &&
vMethodTableFields.Request(g_sos, ar.StateMachineMT) == S_OK &&
vMethodTableFields.wNumInstanceFields + vMethodTableFields.wNumStaticFields > 0)
{
DisplayFields(ar.StateMachineMT, &mtabledata, &vMethodTableFields, (DWORD_PTR)ar.StateMachineAddr, TRUE, ar.IsValueType);
}
}
void FindStateMachineTypes(DWORD_PTR* corelibModule, mdTypeDef* stateMachineBox, mdTypeDef* debugStateMachineBox, mdTypeDef* task)
{
int numModule;
ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(const_cast<LPSTR>("System.Private.CoreLib.dll"), &numModule);
if (moduleList != NULL && numModule == 1)
{
*corelibModule = moduleList[0];
GetInfoFromName(*corelibModule, "System.Runtime.CompilerServices.AsyncTaskMethodBuilder`1+AsyncStateMachineBox`1", stateMachineBox);
GetInfoFromName(*corelibModule, "System.Runtime.CompilerServices.AsyncTaskMethodBuilder`1+DebugFinalizableAsyncStateMachineBox`1", debugStateMachineBox);
GetInfoFromName(*corelibModule, "System.Threading.Tasks.Task", task);
}
else
{
*corelibModule = 0;
*stateMachineBox = 0;
*debugStateMachineBox = 0;
}
}
DECLARE_API(DumpAsync)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return E_FAIL;
}
try
{
// Process command-line arguments.
size_t nArg = 0;
TADDR mt = NULL, addr = NULL;
ArrayHolder<char> ansiType = NULL;
ArrayHolder<WCHAR> type = NULL;
BOOL dml = FALSE, includeCompleted = FALSE, includeStacks = FALSE, includeRoots = FALSE, includeAllTasks = FALSE, dumpFields = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{ "-addr", &addr, COHEX, TRUE }, // dump only the async object at the specified address
{ "-mt", &mt, COHEX, TRUE }, // dump only async objects with a given MethodTable
{ "-type", &ansiType, COSTRING, TRUE }, // dump only async objects that contain the specified type substring
{ "-tasks", &includeAllTasks, COBOOL, FALSE }, // include all tasks that can be found on the heap, not just async methods
{ "-completed", &includeCompleted, COBOOL, FALSE }, // include async objects that are in a completed state
{ "-fields", &dumpFields, COBOOL, FALSE }, // show relevant fields of found async objects
{ "-stacks", &includeStacks, COBOOL, FALSE }, // gather and output continuation/stack information
{ "-roots", &includeRoots, COBOOL, FALSE }, // gather and output GC root information
#ifndef FEATURE_PAL
{ "/d", &dml, COBOOL, FALSE }, // Debugger Markup Language
#endif
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, &nArg) || nArg != 0)
{
sos::Throw<sos::Exception>(
"Usage: DumpAsync [-addr ObjectAddr] [-mt MethodTableAddr] [-type TypeName] [-tasks] [-completed] [-fields] [-stacks] [-roots]\n"
"[-addr ObjectAddr] => Only display the async object at the specified address.\n"
"[-mt MethodTableAddr] => Only display top-level async objects with the specified method table address.\n"
"[-type TypeName] => Only display top-level async objects whose type name includes the specified substring.\n"
"[-tasks] => Include Task and Task-derived objects, in addition to any state machine objects found.\n"
"[-completed] => Include async objects that represent completed operations but that are still on the heap.\n"
"[-fields] => Show the fields of state machines.\n"
"[-stacks] => Gather, output, and consolidate based on continuation chains / async stacks for discovered async objects.\n"
"[-roots] => Perform a gcroot on each rendered async object.\n"
);
}
if (ansiType != NULL)
{
size_t ansiTypeLen = strlen(ansiType) + 1;
type = new WCHAR[ansiTypeLen];
MultiByteToWideChar(CP_ACP, 0, ansiType, -1, type, (int)ansiTypeLen);
}
EnableDMLHolder dmlHolder(dml);
BOOL hasTypeFilter = mt != NULL || ansiType != NULL || addr != NULL;
// Display a message if the heap isn't verified.
sos::GCHeap gcheap;
if (!gcheap.AreGCStructuresValid())
{
DisplayInvalidStructuresMessage();
}
// Find the state machine types
DWORD_PTR corelibModule;
mdTypeDef stateMachineBoxMd, debugStateMachineBoxMd, taskMd;
FindStateMachineTypes(&corelibModule, &stateMachineBoxMd, &debugStateMachineBoxMd, &taskMd);
// Walk each heap object looking for async state machine objects. As we're targeting .NET Core 2.1+, all such objects
// will be Task or Task-derived types.
std::map<CLRDATA_ADDRESS, AsyncRecord> asyncRecords;
for (sos::ObjectIterator itr = gcheap.WalkHeap(); !IsInterrupt() && itr != NULL; ++itr)
{
// Skip objects too small to be state machines or tasks, avoiding some compiler-generated caching data structures.
if (itr->GetSize() <= 24)
{
continue;
}
// Match only async objects.
if (includeAllTasks)
{
// If the user has selected to include all tasks and not just async state machine boxes, we simply need to validate
// that this is Task or Task-derived, and if it's not, skip it.
if (!IsDerivedFrom(itr->GetMT(), corelibModule, taskMd))
{
continue;
}
}
else
{
// Otherwise, we only care about AsyncStateMachineBox`1 as well as the DebugFinalizableAsyncStateMachineBox`1
// that's used when certain ETW events are set.
DacpMethodTableData mtdata;
if (mtdata.Request(g_sos, TO_TADDR(itr->GetMT())) != S_OK ||
mtdata.Module != corelibModule ||
(mtdata.cl != stateMachineBoxMd && mtdata.cl != debugStateMachineBoxMd))
{
continue;
}
}
// Create an AsyncRecord to store the state for this instance. We're likely going to keep the object at this point,
// though we may still discard/skip it with a few checks later; to do that, though, we'll need some of the info
// gathered here, so we construct the record to store the data.
AsyncRecord ar;
ar.Address = itr->GetAddress();
ar.MT = itr->GetMT();
ar.Size = (DWORD)itr->GetSize();
ar.StateMachineAddr = itr->GetAddress();
ar.StateMachineMT = itr->GetMT();
ar.IsValueType = false;
ar.IsTopLevel = true;
ar.IsStateMachine = false;
ar.TaskStateFlags = 0;
ar.StateValue = 0;
ar.FilteredByOptions = // we process all objects to support forming proper chains, but then only display ones that match the user's request
(mt == NULL || mt == itr->GetMT()) && // Match only MTs the user requested.
(type == NULL || _wcsstr(itr->GetTypeName(), type) != NULL) && // Match only type name substrings the user requested.
(addr == NULL || addr == itr->GetAddress()); // Match only the object at the specified address.
// Get the state flags for the task. This is used to determine whether async objects are completed (and thus should
// be culled by default). It avoids our needing to depend on interpreting the compiler's "<>1__state" field, and also lets
// us display state information for non-async state machine objects.
DacpFieldDescData stateFlagsField;
int offset = GetObjFieldOffset(ar.Address, ar.MT, W("m_stateFlags"), TRUE, &stateFlagsField);
if (offset != 0)
{
MOVE(ar.TaskStateFlags, ar.Address + offset);
}
// Get the async state machine object's StateMachine field.
DacpFieldDescData stateMachineField;
int stateMachineFieldOffset = GetObjFieldOffset(TO_CDADDR(itr->GetAddress()), itr->GetMT(), W("StateMachine"), TRUE, &stateMachineField);
if (stateMachineFieldOffset != 0)
{
ar.IsStateMachine = true;
ar.IsValueType = stateMachineField.Type == ELEMENT_TYPE_VALUETYPE;
// Get the address and method table of the state machine. While it'll generally be a struct, it is valid for it to be a
// class (the C# compiler generates a class in debug builds to better support Edit-And-Continue), so we accommodate both.
DacpFieldDescData stateField;
int stateFieldOffset = -1;
if (ar.IsValueType)
{
ar.StateMachineAddr = itr->GetAddress() + stateMachineFieldOffset;
ar.StateMachineMT = stateMachineField.MTOfType;
stateFieldOffset = GetValueFieldOffset(ar.StateMachineMT, W("<>1__state"), &stateField);
}
else
{
MOVE(ar.StateMachineAddr, itr->GetAddress() + stateMachineFieldOffset);
DacpObjectData objData;
if (objData.Request(g_sos, ar.StateMachineAddr) == S_OK)
{
ar.StateMachineMT = objData.MethodTable; // update from Canon to actual type
stateFieldOffset = GetObjFieldOffset(ar.StateMachineAddr, ar.StateMachineMT, W("<>1__state"), TRUE, &stateField);
}
}
if (stateFieldOffset >= 0 && (ar.IsValueType || stateFieldOffset != 0))
{
MOVE(ar.StateValue, ar.StateMachineAddr + stateFieldOffset);
}
}
// If we only want to include incomplete async objects, skip this one if it's completed.
if (!includeCompleted && AsyncRecordIsCompleted(ar))
{
continue;
}
// If the user has asked to include "async stacks" information, resolve any continuation
// that might be registered with it. This could be a single continuation, or it could
// be a list of continuations in the case of the same task being awaited multiple times.
CLRDATA_ADDRESS nextAddr;
if (includeStacks && TryGetContinuation(itr->GetAddress(), itr->GetMT(), &nextAddr))
{
sos::Object contObj = TO_TADDR(nextAddr);
if (_wcsncmp(contObj.GetTypeName(), W("System.Collections.Generic.List`1"), 33) == 0)
{
// The continuation is a List<object>. Iterate through its internal object[]
// looking for non-null objects, and adding each one as a continuation.
int itemsOffset = GetObjFieldOffset(contObj.GetAddress(), contObj.GetMT(), W("_items"));
if (itemsOffset != 0)
{
DWORD_PTR listItemsPtr;
MOVE(listItemsPtr, contObj.GetAddress() + itemsOffset);
if (sos::IsObject(listItemsPtr, false))
{
DacpObjectData objData;
if (objData.Request(g_sos, TO_CDADDR(listItemsPtr)) == S_OK && objData.ObjectType == OBJ_ARRAY)
{
for (int i = 0; i < objData.dwNumComponents; i++)
{
CLRDATA_ADDRESS elementPtr;
MOVE(elementPtr, TO_CDADDR(objData.ArrayDataPtr + (i * objData.dwComponentSize)));
if (elementPtr != NULL && sos::IsObject(elementPtr, false))
{
ResolveContinuation(&elementPtr);
ar.Continuations.push_back(elementPtr);
}
}
}
}
}
}
else
{
ar.Continuations.push_back(contObj.GetAddress());
}
}
// We've gathered all of the needed information for this heap object. Add it to our list of async records.
asyncRecords.insert(std::pair<CLRDATA_ADDRESS, AsyncRecord>(ar.Address, ar));
}
// As with DumpHeap, output a summary table about all of the objects we found. In contrast, though, his is based on the filtered
// list of async records we gathered rather than everything on the heap.
if (addr == NULL) // no point in stats if we're only targeting a single object
{
HeapStat stats;
for (std::map<CLRDATA_ADDRESS, AsyncRecord>::iterator arIt = asyncRecords.begin(); arIt != asyncRecords.end(); ++arIt)
{
if (!hasTypeFilter || arIt->second.FilteredByOptions)
{
stats.Add((DWORD_PTR)arIt->second.MT, (DWORD)arIt->second.Size);
}
}
stats.Sort();
stats.Print();
}
// If the user has asked for "async stacks" and if there's not MT/type name filter, look through all of our async records
// to find the "top-level" nodes that start rather than that are a part of a continuation chain. When we then iterate through
// async records, we only print ones out that are still classified as top-level. We don't do this if there's a type filter
// because in that case we consider those and only those objects to be top-level.
if (includeStacks && !hasTypeFilter)
{
size_t uniqueChains = asyncRecords.size();
for (std::map<CLRDATA_ADDRESS, AsyncRecord>::iterator arIt = asyncRecords.begin(); arIt != asyncRecords.end(); ++arIt)
{
for (std::vector<CLRDATA_ADDRESS>::iterator contIt = arIt->second.Continuations.begin(); contIt != arIt->second.Continuations.end(); ++contIt)
{
std::map<CLRDATA_ADDRESS, AsyncRecord>::iterator found = asyncRecords.find(*contIt);
if (found != asyncRecords.end())
{
if (found->second.IsTopLevel)
{
found->second.IsTopLevel = false;
uniqueChains--;
}
}
}
}
ExtOut("In %d chains.\n", uniqueChains);
}
// Print out header for the main line of each result.
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %8s ", "Address", "MT", "Size");
if (includeCompleted) ExtOut("%8s ", "Status");
ExtOut("%10s %s\n", "State", "Description");
// Output each top-level async record.
int counter = 0;
for (std::map<CLRDATA_ADDRESS, AsyncRecord>::iterator arIt = asyncRecords.begin(); arIt != asyncRecords.end(); ++arIt)
{
if (!arIt->second.IsTopLevel || (hasTypeFilter && !arIt->second.FilteredByOptions))
{
continue;
}
// Output the state machine's details as a single line.
sos::Object obj = TO_TADDR(arIt->second.Address);
if (arIt->second.IsStateMachine)
{
// This has a StateMachine. Output its details.
sos::MethodTable mt = TO_TADDR(arIt->second.StateMachineMT);
DMLOut("%s %s %8d ", DMLAsync(obj.GetAddress()), DMLDumpHeapMT(obj.GetMT()), obj.GetSize());
if (includeCompleted) ExtOut("%8s ", GetAsyncRecordStatusDescription(arIt->second));
ExtOut("%10d %S\n", arIt->second.StateValue, mt.GetName());
if (dumpFields) ExtOutStateMachineFields(arIt->second);
}
else
{
// This does not have a StateMachine. Output the details of the Task itself.
DMLOut("%s %s %8d ", DMLAsync(obj.GetAddress()), DMLDumpHeapMT(obj.GetMT()), obj.GetSize());
if (includeCompleted) ExtOut("%8s ", GetAsyncRecordStatusDescription(arIt->second));
ExtOut("[%08x] %S ", arIt->second.TaskStateFlags, obj.GetTypeName());
ExtOutTaskDelegateMethod(obj);
ExtOut("\n");
if (dumpFields) ExtOutTaskStateFlagsDescription(arIt->second.TaskStateFlags);
}
// If we gathered any continuations for this record, output the chains now.
if (includeStacks && arIt->second.Continuations.size() > 0)
{
ExtOut(includeAllTasks ? "Continuation chains:\n" : "Async \"stack\":\n");
std::vector<std::pair<int, CLRDATA_ADDRESS>> continuationChainToExplore;
continuationChainToExplore.push_back(std::pair<int, CLRDATA_ADDRESS>(1, obj.GetAddress()));
// Do a depth-first traversal of continuations, outputting each continuation found and then
// looking in our gathered objects list for its continuations.
std::set<CLRDATA_ADDRESS> seen;
while (continuationChainToExplore.size() > 0)
{
// Pop the next continuation from the stack.
std::pair<int, CLRDATA_ADDRESS> cur = continuationChainToExplore.back();
continuationChainToExplore.pop_back();
// Get the async record for this continuation. It should be one we already know about.
std::map<CLRDATA_ADDRESS, AsyncRecord>::iterator curAsyncRecord = asyncRecords.find(cur.second);
if (curAsyncRecord == asyncRecords.end())
{
continue;
}
// Make sure to avoid cycles in the rare case where async records may refer to each other.
if (seen.find(cur.second) != seen.end())
{
continue;
}
seen.insert(cur.second);
// Iterate through all continuations from this object.
for (std::vector<CLRDATA_ADDRESS>::iterator contIt = curAsyncRecord->second.Continuations.begin(); contIt != curAsyncRecord->second.Continuations.end(); ++contIt)
{
sos::Object cont = TO_TADDR(*contIt);
// Print out the depth of the continuation with dots, then its address.
for (int i = 0; i < cur.first; i++) ExtOut(".");
DMLOut("%s ", DMLObject(cont.GetAddress()));
// Print out the name of the method for this task's delegate if it has one (state machines won't, but others tasks may).
ExtOutTaskDelegateMethod(cont);
// Find the async record for this continuation, and output its name. If it's a state machine,
// also output its current state value so that a user can see at a glance its status.
std::map<CLRDATA_ADDRESS, AsyncRecord>::iterator contAsyncRecord = asyncRecords.find(cont.GetAddress());
if (contAsyncRecord != asyncRecords.end())
{
sos::MethodTable contMT = TO_TADDR(contAsyncRecord->second.StateMachineMT);
if (contAsyncRecord->second.IsStateMachine) ExtOut("(%d) ", contAsyncRecord->second.StateValue);
ExtOut("%S\n", contMT.GetName());
if (contAsyncRecord->second.IsStateMachine && dumpFields) ExtOutStateMachineFields(contAsyncRecord->second);
}
else
{
ExtOut("%S\n", cont.GetTypeName());
}
// Add this continuation to the stack to explore.
continuationChainToExplore.push_back(std::pair<int, CLRDATA_ADDRESS>(cur.first + 1, *contIt));
}
}
}
// Finally, output gcroots, as they can serve as alternative/more detailed "async stacks", and also help to highlight
// state machines that aren't being kept alive. However, they're more expensive to compute, so they're opt-in.
if (includeRoots)
{
ExtOut("GC roots:\n");
IncrementIndent();
GCRootImpl gcroot;
int numRoots = gcroot.PrintRootsForObject(obj.GetAddress(), FALSE, FALSE);
DecrementIndent();
if (numRoots == 0 && !AsyncRecordIsCompleted(arIt->second))
{
ExtOut("Incomplete state machine or task with 0 roots.\n");
}
}
// If we're rendering more than one line per entry, output a separator to help distinguish the entries.
if (dumpFields || includeStacks || includeRoots)
{
ExtOut("--------------------------------------------------------------------------------\n");
}
}
return S_OK;
}
catch (const sos::Exception &e)
{
ExtOut("%s\n", e.what());
return E_FAIL;
}
}
/**********************************************************************\
* Routine Description: *
* *
* This function dumps all objects on GC heap. It also displays *
* statistics of objects. If GC heap is corrupted, it will stop at
* the bad place. (May not work if GC is in progress.) *
* *
\**********************************************************************/
DECLARE_API(DumpHeap)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return E_FAIL;
}
try
{
DumpHeapImpl dumpHeap(args);
dumpHeap.Run();
return S_OK;
}
catch(const sos::Exception &e)
{
ExtOut("%s\n", e.what());
return E_FAIL;
}
}
DECLARE_API(VerifyHeap)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return E_FAIL;
}
try
{
bool succeeded = true;
char buffer[1024];
sos::GCHeap gcheap;
sos::ObjectIterator itr = gcheap.WalkHeap();
while (itr)
{
if (itr.Verify(buffer, _countof(buffer)))
{
++itr;
}
else
{
succeeded = false;
ExtOut(buffer);
itr.MoveToNextObjectCarefully();
}
}
if (!DumpHeapImpl::ValidateSyncTable(gcheap))
succeeded = false;
if (succeeded)
ExtOut("No heap corruption detected.\n");
return S_OK;
}
catch(const sos::Exception &e)
{
ExtOut("%s\n", e.what());
return E_FAIL;
}
}
#ifndef FEATURE_PAL
enum failure_get_memory
{
fgm_no_failure = 0,
fgm_reserve_segment = 1,
fgm_commit_segment_beg = 2,
fgm_commit_eph_segment = 3,
fgm_grow_table = 4,
fgm_commit_table = 5
};
enum oom_reason
{
oom_no_failure = 0,
oom_budget = 1,
oom_cant_commit = 2,
oom_cant_reserve = 3,
oom_loh = 4,
oom_low_mem = 5,
oom_unproductive_full_gc = 6
};
static const char *const str_oom[] =
{
"There was no managed OOM due to allocations on the GC heap", // oom_no_failure
"This is likely to be a bug in GC", // oom_budget
"Didn't have enough memory to commit", // oom_cant_commit
"This is likely to be a bug in GC", // oom_cant_reserve
"Didn't have enough memory to allocate an LOH segment", // oom_loh
"Low on memory during GC", // oom_low_mem
"Could not do a full GC" // oom_unproductive_full_gc
};
static const char *const str_fgm[] =
{
"There was no failure to allocate memory", // fgm_no_failure
"Failed to reserve memory", // fgm_reserve_segment
"Didn't have enough memory to commit beginning of the segment", // fgm_commit_segment_beg
"Didn't have enough memory to commit the new ephemeral segment", // fgm_commit_eph_segment
"Didn't have enough memory to grow the internal GC data structures", // fgm_grow_table
"Didn't have enough memory to commit the internal GC data structures", // fgm_commit_table
};
void PrintOOMInfo(DacpOomData* oomData)
{
ExtOut("Managed OOM occurred after GC #%d (Requested to allocate %d bytes)\n",
oomData->gc_index, oomData->alloc_size);
if ((oomData->reason == oom_budget) ||
(oomData->reason == oom_cant_reserve))
{
// TODO: This message needs to be updated with more precious info.
ExtOut("%s, please contact PSS\n", str_oom[oomData->reason]);
}
else
{
ExtOut("Reason: %s\n", str_oom[oomData->reason]);
}
// Now print out the more detailed memory info if any.
if (oomData->fgm != fgm_no_failure)
{
ExtOut("Detail: %s: %s (%d bytes)",
(oomData->loh_p ? "LOH" : "SOH"),
str_fgm[oomData->fgm],
oomData->size);
if ((oomData->fgm == fgm_commit_segment_beg) ||
(oomData->fgm == fgm_commit_eph_segment) ||
(oomData->fgm == fgm_grow_table) ||
(oomData->fgm == fgm_commit_table))
{
// If it's a commit error (fgm_grow_table can indicate a reserve
// or a commit error since we make one VirtualAlloc call to
// reserve and commit), we indicate the available commit
// space if we recorded it.
if (oomData->available_pagefile_mb)
{
ExtOut(" - on GC entry available commit space was %d MB",
oomData->available_pagefile_mb);
}
}
ExtOut("\n");
}
}
DECLARE_API(AnalyzeOOM)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
#ifndef FEATURE_PAL
if (!InitializeHeapData ())
{
ExtOut("GC Heap not initialized yet.\n");
return S_OK;
}
BOOL bHasManagedOOM = FALSE;
DacpOomData oomData;
memset (&oomData, 0, sizeof(oomData));
if (!IsServerBuild())
{
if (oomData.Request(g_sos) != S_OK)
{
ExtOut("Error requesting OOM data\n");
return E_FAIL;
}
if (oomData.reason != oom_no_failure)
{
bHasManagedOOM = TRUE;
PrintOOMInfo(&oomData);
}
}
else
{
DWORD dwNHeaps = GetGcHeapCount();
DWORD dwAllocSize;
if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
{
ExtOut("Failed to get GCHeaps: integer overflow\n");
return Status;
}
CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
{
ExtOut("Failed to get GCHeaps\n");
return Status;
}
for (DWORD n = 0; n < dwNHeaps; n ++)
{
if (oomData.Request(g_sos, heapAddrs[n]) != S_OK)
{
ExtOut("Heap %d: Error requesting OOM data\n", n);
return E_FAIL;
}
if (oomData.reason != oom_no_failure)
{
if (!bHasManagedOOM)
{
bHasManagedOOM = TRUE;
}
ExtOut("---------Heap %#-2d---------\n", n);
PrintOOMInfo(&oomData);
}
}
}
if (!bHasManagedOOM)
{
ExtOut("%s\n", str_oom[oomData.reason]);
}
return S_OK;
#else
_ASSERTE(false);
return E_FAIL;
#endif // FEATURE_PAL
}
DECLARE_API(VerifyObj)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
TADDR taddrObj = 0;
TADDR taddrMT;
size_t objSize;
BOOL bValid = FALSE;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&taddrObj, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
BOOL bContainsPointers;
if (FAILED(GetMTOfObject(taddrObj, &taddrMT)) ||
!GetSizeEfficient(taddrObj, taddrMT, FALSE, objSize, bContainsPointers))
{
ExtOut("object %#p does not have valid method table\n", SOS_PTR(taddrObj));
goto Exit;
}
// we need to build g_snapshot as it is later used in GetGeneration
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
goto Exit;
}
{
DacpGcHeapDetails *pheapDetails = g_snapshot.GetHeap(taddrObj);
bValid = VerifyObject(*pheapDetails, taddrObj, taddrMT, objSize, TRUE);
}
Exit:
if (bValid)
{
ExtOut("object %#p is a valid object\n", SOS_PTR(taddrObj));
}
return Status;
}
void LNODisplayOutput(LPCWSTR tag, TADDR pMT, TADDR currentObj, size_t size)
{
sos::Object obj(currentObj, pMT);
DMLOut("%S %s %12d (0x%x)\t%S\n", tag, DMLObject(currentObj), size, size, obj.GetTypeName());
}
DECLARE_API(ListNearObj)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
#if !defined(FEATURE_PAL)
TADDR taddrArg = 0;
TADDR taddrObj = 0;
// we may want to provide a more exact version of searching for the
// previous object in the heap, using the brick table, instead of
// looking for what may be valid method tables...
//BOOL bExact;
//CMDOption option[] =
//{
// // name, vptr, type, hasValue
// {"-exact", &bExact, COBOOL, FALSE}
//};
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{
// vptr, type
{&taddrArg, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg) || nArg != 1)
{
ExtOut("Usage: !ListNearObj <obj_address>\n");
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return Status;
}
taddrObj = Align(taddrArg);
DacpGcHeapDetails *heap = g_snapshot.GetHeap(taddrArg);
if (heap == NULL)
{
ExtOut("Address %p does not lie in the managed heap\n", SOS_PTR(taddrObj));
return Status;
}
TADDR_SEGINFO trngSeg = {0, 0, 0};
TADDR_RANGE allocCtx = {0, 0};
BOOL bLarge;
int gen;
if (!GCObjInHeap(taddrObj, *heap, trngSeg, gen, allocCtx, bLarge))
{
ExtOut("Failed to find the segment of the managed heap where the object %p resides\n",
SOS_PTR(taddrObj));
return Status;
}
TADDR objMT = NULL;
size_t objSize = 0;
BOOL bObj = FALSE;
TADDR taddrCur;
TADDR curMT = 0;
size_t curSize = 0;
BOOL bCur = FALSE;
TADDR taddrNxt;
TADDR nxtMT = 0;
size_t nxtSize = 0;
BOOL bNxt = FALSE;
BOOL bContainsPointers;
std::vector<TADDR> candidate;
candidate.reserve(10);
// since we'll be reading back I'll prime the read cache to a buffer before the current address
MOVE(taddrCur, _max(trngSeg.start, taddrObj-DT_OS_PAGE_SIZE));
// ===== Look for a good candidate preceeding taddrObj
for (taddrCur = taddrObj - sizeof(TADDR); taddrCur >= trngSeg.start; taddrCur -= sizeof(TADDR))
{
// currently we don't pay attention to allocation contexts. if this
// proves to be an issue we need to reconsider the code below
if (SUCCEEDED(GetMTOfObject(taddrCur, &curMT)) &&
GetSizeEfficient(taddrCur, curMT, bLarge, curSize, bContainsPointers))
{
// remember this as one of the possible "good" objects preceeding taddrObj
candidate.push_back(taddrCur);
std::vector<TADDR>::iterator it =
std::find(candidate.begin(), candidate.end(), taddrCur+curSize);
if (it != candidate.end())
{
// We found a chain of two objects preceeding taddrObj. We'll
// trust this is a good indication that the two objects are valid.
// What is not valid is possibly the object following the second
// one...
taddrCur = *it;
GetMTOfObject(taddrCur, &curMT);
GetSizeEfficient(taddrCur, curMT, bLarge, curSize, bContainsPointers);
bCur = TRUE;
break;
}
}
}
if (!bCur && !candidate.empty())
{
// pick the closest object to taddrObj
taddrCur = *(candidate.begin());
GetMTOfObject(taddrCur, &curMT);
GetSizeEfficient(taddrCur, curMT, bLarge, curSize, bContainsPointers);
// we have a candidate, even if not confirmed
bCur = TRUE;
}
taddrNxt = taddrObj;
if (taddrArg == taddrObj)
{
taddrNxt += sizeof(TADDR);
}
// ===== Now look at taddrObj
if (taddrObj == taddrArg)
{
// only look at taddrObj if it's the same as what user passed in, meaning it's aligned.
if (SUCCEEDED(GetMTOfObject(taddrObj, &objMT)) &&
GetSizeEfficient(taddrObj, objMT, bLarge, objSize, bContainsPointers))
{
bObj = TRUE;
taddrNxt = taddrObj+objSize;
}
}
if ((taddrCur + curSize > taddrArg) && taddrCur + curSize < trngSeg.end)
{
if (SUCCEEDED(GetMTOfObject(taddrCur + curSize, &nxtMT)) &&
GetSizeEfficient(taddrObj, objMT, bLarge, objSize, bContainsPointers))
{
taddrNxt = taddrCur+curSize;
}
}
// ===== And finally move on to elements following taddrObj
for (; taddrNxt < trngSeg.end; taddrNxt += sizeof(TADDR))
{
if (SUCCEEDED(GetMTOfObject(taddrNxt, &nxtMT)) &&
GetSizeEfficient(taddrNxt, nxtMT, bLarge, nxtSize, bContainsPointers))
{
bNxt = TRUE;
break;
}
}
if (bCur)
LNODisplayOutput(W("Before: "), curMT, taddrCur, curSize);
else
ExtOut("Before: couldn't find any object between %#p and %#p\n",
SOS_PTR(trngSeg.start), SOS_PTR(taddrArg));
if (bObj)
LNODisplayOutput(W("Current:"), objMT, taddrObj, objSize);
if (bNxt)
LNODisplayOutput(W("After: "), nxtMT, taddrNxt, nxtSize);
else
ExtOut("After: couldn't find any object between %#p and %#p\n",
SOS_PTR(taddrArg), SOS_PTR(trngSeg.end));
if (bCur && bNxt &&
(((taddrCur+curSize == taddrObj) && (taddrObj+objSize == taddrNxt)) || (taddrCur+curSize == taddrNxt)))
{
ExtOut("Heap local consistency confirmed.\n");
}
else
{
ExtOut("Heap local consistency not confirmed.\n");
}
return Status;
#else
_ASSERTE(false);
return E_FAIL;
#endif // FEATURE_PAL
}
DECLARE_API(GCHeapStat)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
#ifndef FEATURE_PAL
BOOL bIncUnreachable = FALSE;
BOOL dml = FALSE;
CMDOption option[] = {
// name, vptr, type, hasValue
{"-inclUnrooted", &bIncUnreachable, COBOOL, FALSE},
{"-iu", &bIncUnreachable, COBOOL, FALSE},
{"/d", &dml, COBOOL, FALSE}
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
ExtOut("%-8s %12s %12s %12s %12s\n", "Heap", "Gen0", "Gen1", "Gen2", "LOH");
if (!IsServerBuild())
{
float tempf;
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos) != S_OK)
{
ExtErr("Error requesting gc heap details\n");
return Status;
}
HeapUsageStat hpUsage;
if (GCHeapUsageStats(heapDetails, bIncUnreachable, &hpUsage))
{
ExtOut("Heap%-4d %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u\n", 0,
hpUsage.genUsage[0].allocd, hpUsage.genUsage[1].allocd,
hpUsage.genUsage[2].allocd, hpUsage.genUsage[3].allocd);
ExtOut("\nFree space: Percentage\n");
ExtOut("Heap%-4d %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u ", 0,
hpUsage.genUsage[0].freed, hpUsage.genUsage[1].freed,
hpUsage.genUsage[2].freed, hpUsage.genUsage[3].freed);
tempf = ((float)(hpUsage.genUsage[0].freed+hpUsage.genUsage[1].freed+hpUsage.genUsage[2].freed)) /
(hpUsage.genUsage[0].allocd+hpUsage.genUsage[1].allocd+hpUsage.genUsage[2].allocd);
ExtOut("SOH:%3d%% LOH:%3d%%\n", (int)(100 * tempf),
(int)(100*((float)hpUsage.genUsage[3].freed) / (hpUsage.genUsage[3].allocd)));
if (bIncUnreachable)
{
ExtOut("\nUnrooted objects: Percentage\n");
ExtOut("Heap%-4d %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u ", 0,
hpUsage.genUsage[0].unrooted, hpUsage.genUsage[1].unrooted,
hpUsage.genUsage[2].unrooted, hpUsage.genUsage[3].unrooted);
tempf = ((float)(hpUsage.genUsage[0].unrooted+hpUsage.genUsage[1].unrooted+hpUsage.genUsage[2].unrooted)) /
(hpUsage.genUsage[0].allocd+hpUsage.genUsage[1].allocd+hpUsage.genUsage[2].allocd);
ExtOut("SOH:%3d%% LOH:%3d%%\n", (int)(100 * tempf),
(int)(100*((float)hpUsage.genUsage[3].unrooted) / (hpUsage.genUsage[3].allocd)));
}
}
}
else
{
float tempf;
DacpGcHeapData gcheap;
if (gcheap.Request(g_sos) != S_OK)
{
ExtErr("Error requesting GC Heap data\n");
return Status;
}
DWORD dwAllocSize;
DWORD dwNHeaps = gcheap.HeapCount;
if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
{
ExtErr("Failed to get GCHeaps: integer overflow\n");
return Status;
}
CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
{
ExtErr("Failed to get GCHeaps\n");
return Status;
}
ArrayHolder<HeapUsageStat> hpUsage = new NOTHROW HeapUsageStat[dwNHeaps];
if (hpUsage == NULL)
{
ReportOOM();
return Status;
}
// aggregate stats across heaps / generation
GenUsageStat genUsageStat[4] = {0, 0, 0, 0};
for (DWORD n = 0; n < dwNHeaps; n ++)
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
{
ExtErr("Error requesting gc heap details\n");
return Status;
}
if (GCHeapUsageStats(heapDetails, bIncUnreachable, &hpUsage[n]))
{
for (int i = 0; i < 4; ++i)
{
genUsageStat[i].allocd += hpUsage[n].genUsage[i].allocd;
genUsageStat[i].freed += hpUsage[n].genUsage[i].freed;
if (bIncUnreachable)
{
genUsageStat[i].unrooted += hpUsage[n].genUsage[i].unrooted;
}
}
}
}
for (DWORD n = 0; n < dwNHeaps; n ++)
{
ExtOut("Heap%-4d %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u\n", n,
hpUsage[n].genUsage[0].allocd, hpUsage[n].genUsage[1].allocd,
hpUsage[n].genUsage[2].allocd, hpUsage[n].genUsage[3].allocd);
}
ExtOut("Total %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u\n",
genUsageStat[0].allocd, genUsageStat[1].allocd,
genUsageStat[2].allocd, genUsageStat[3].allocd);
ExtOut("\nFree space: Percentage\n");
for (DWORD n = 0; n < dwNHeaps; n ++)
{
ExtOut("Heap%-4d %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u ", n,
hpUsage[n].genUsage[0].freed, hpUsage[n].genUsage[1].freed,
hpUsage[n].genUsage[2].freed, hpUsage[n].genUsage[3].freed);
tempf = ((float)(hpUsage[n].genUsage[0].freed+hpUsage[n].genUsage[1].freed+hpUsage[n].genUsage[2].freed)) /
(hpUsage[n].genUsage[0].allocd+hpUsage[n].genUsage[1].allocd+hpUsage[n].genUsage[2].allocd);
ExtOut("SOH:%3d%% LOH:%3d%%\n", (int)(100 * tempf),
(int)(100*((float)hpUsage[n].genUsage[3].freed) / (hpUsage[n].genUsage[3].allocd))
);
}
ExtOut("Total %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u\n",
genUsageStat[0].freed, genUsageStat[1].freed,
genUsageStat[2].freed, genUsageStat[3].freed);
if (bIncUnreachable)
{
ExtOut("\nUnrooted objects: Percentage\n");
for (DWORD n = 0; n < dwNHeaps; n ++)
{
ExtOut("Heap%-4d %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u ", n,
hpUsage[n].genUsage[0].unrooted, hpUsage[n].genUsage[1].unrooted,
hpUsage[n].genUsage[2].unrooted, hpUsage[n].genUsage[3].unrooted);
tempf = ((float)(hpUsage[n].genUsage[0].unrooted+hpUsage[n].genUsage[1].unrooted+hpUsage[n].genUsage[2].unrooted)) /
(hpUsage[n].genUsage[0].allocd+hpUsage[n].genUsage[1].allocd+hpUsage[n].genUsage[2].allocd);
ExtOut("SOH:%3d%% LOH:%3d%%\n", (int)(100 * tempf),
(int)(100*((float)hpUsage[n].genUsage[3].unrooted) / (hpUsage[n].genUsage[3].allocd)));
}
ExtOut("Total %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u %12" POINTERSIZE_TYPE "u\n",
genUsageStat[0].unrooted, genUsageStat[1].unrooted,
genUsageStat[2].unrooted, genUsageStat[3].unrooted);
}
}
return Status;
#else
_ASSERTE(false);
return E_FAIL;
#endif // FEATURE_PAL
}
#endif // FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function dumps what is in the syncblock cache. By default *
* it dumps all active syncblocks. Using -all to dump all syncblocks
* *
\**********************************************************************/
DECLARE_API(SyncBlk)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL bDumpAll = FALSE;
size_t nbAsked = 0;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-all", &bDumpAll, COBOOL, FALSE},
{"/d", &dml, COBOOL, FALSE}
};
CMDValue arg[] =
{ // vptr, type
{&nbAsked, COSIZE_T}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
DacpSyncBlockData syncBlockData;
if (syncBlockData.Request(g_sos,1) != S_OK)
{
ExtOut("Error requesting SyncBlk data\n");
return Status;
}
DWORD dwCount = syncBlockData.SyncBlockCount;
ExtOut("Index" WIN64_8SPACES " SyncBlock MonitorHeld Recursion Owning Thread Info" WIN64_8SPACES " SyncBlock Owner\n");
ULONG freeCount = 0;
ULONG CCWCount = 0;
ULONG RCWCount = 0;
ULONG CFCount = 0;
for (DWORD nb = 1; nb <= dwCount; nb++)
{
if (IsInterrupt())
return Status;
if (nbAsked && nb != nbAsked)
{
continue;
}
if (syncBlockData.Request(g_sos,nb) != S_OK)
{
ExtOut("SyncBlock %d is invalid%s\n", nb,
(nb != nbAsked) ? ", continuing..." : "");
continue;
}
BOOL bPrint = (bDumpAll || nb == nbAsked || (syncBlockData.MonitorHeld > 0 && !syncBlockData.bFree));
if (bPrint)
{
ExtOut("%5d ", nb);
if (!syncBlockData.bFree || nb != nbAsked)
{
ExtOut("%p ", syncBlockData.SyncBlockPointer);
ExtOut("%11d ", syncBlockData.MonitorHeld);
ExtOut("%9d ", syncBlockData.Recursion);
ExtOut("%p ", syncBlockData.HoldingThread);
if (syncBlockData.HoldingThread == ~0ul)
{
ExtOut(" orphaned ");
}
else if (syncBlockData.HoldingThread != NULL)
{
DacpThreadData Thread;
if ((Status = Thread.Request(g_sos, syncBlockData.HoldingThread)) != S_OK)
{
ExtOut("Failed to request Thread at %p\n", syncBlockData.HoldingThread);
return Status;
}
DMLOut(DMLThreadID(Thread.osThreadId));
ULONG id;
if (g_ExtSystem->GetThreadIdBySystemId(Thread.osThreadId, &id) == S_OK)
{
ExtOut("%4d ", id);
}
else
{
ExtOut(" XXX ");
}
}
else
{
ExtOut(" none ");
}
if (syncBlockData.bFree)
{
ExtOut(" %8d", 0); // TODO: do we need to print the free synctable list?
}
else
{
sos::Object obj = TO_TADDR(syncBlockData.Object);
DMLOut(" %s %S", DMLObject(syncBlockData.Object), obj.GetTypeName());
}
}
}
if (syncBlockData.bFree)
{
freeCount ++;
if (bPrint) {
ExtOut(" Free");
}
}
else
{
#ifdef FEATURE_COMINTEROP
if (syncBlockData.COMFlags) {
switch (syncBlockData.COMFlags) {
case SYNCBLOCKDATA_COMFLAGS_CCW:
CCWCount ++;
break;
case SYNCBLOCKDATA_COMFLAGS_RCW:
RCWCount ++;
break;
case SYNCBLOCKDATA_COMFLAGS_CF:
CFCount ++;
break;
}
}
#endif // FEATURE_COMINTEROP
}
if (syncBlockData.MonitorHeld > 1)
{
// TODO: implement this
/*
ExtOut(" ");
DWORD_PTR pHead = (DWORD_PTR)vSyncBlock.m_Link.m_pNext;
DWORD_PTR pNext = pHead;
Thread vThread;
while (1)
{
if (IsInterrupt())
return Status;
DWORD_PTR pWaitEventLink = pNext - offsetLinkSB;
WaitEventLink vWaitEventLink;
vWaitEventLink.Fill(pWaitEventLink);
if (!CallStatus) {
break;
}
DWORD_PTR dwAddr = (DWORD_PTR)vWaitEventLink.m_Thread;
ExtOut("%x ", dwAddr);
vThread.Fill (dwAddr);
if (!CallStatus) {
break;
}
if (bPrint)
DMLOut("%s,", DMLThreadID(vThread.m_OSThreadId));
pNext = (DWORD_PTR)vWaitEventLink.m_LinkSB.m_pNext;
if (pNext == 0)
break;
}
*/
}
if (bPrint)
ExtOut("\n");
}
ExtOut("-----------------------------\n");
ExtOut("Total %d\n", dwCount);
#ifdef FEATURE_COMINTEROP
ExtOut("CCW %d\n", CCWCount);
ExtOut("RCW %d\n", RCWCount);
ExtOut("ComClassFactory %d\n", CFCount);
#endif
ExtOut("Free %d\n", freeCount);
return Status;
}
#ifdef FEATURE_COMINTEROP
struct VisitRcwArgs
{
BOOL bDetail;
UINT MTACount;
UINT STACount;
ULONG FTMCount;
};
void VisitRcw(CLRDATA_ADDRESS RCW,CLRDATA_ADDRESS Context,CLRDATA_ADDRESS Thread, BOOL bIsFreeThreaded, LPVOID token)
{
VisitRcwArgs *pArgs = (VisitRcwArgs *) token;
if (pArgs->bDetail)
{
if (pArgs->MTACount == 0 && pArgs->STACount == 0 && pArgs->FTMCount == 0)
{
// First time, print a header
ExtOut("RuntimeCallableWrappers (RCW) to be cleaned:\n");
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s Apartment\n",
"RCW", "CONTEXT", "THREAD");
}
LPCSTR szThreadApartment;
if (bIsFreeThreaded)
{
szThreadApartment = "(FreeThreaded)";
pArgs->FTMCount++;
}
else if (Thread == NULL)
{
szThreadApartment = "(MTA)";
pArgs->MTACount++;
}
else
{
szThreadApartment = "(STA)";
pArgs->STACount++;
}
ExtOut("%" POINTERSIZE "p %" POINTERSIZE "p %" POINTERSIZE "p %9s\n",
SOS_PTR(RCW),
SOS_PTR(Context),
SOS_PTR(Thread),
szThreadApartment);
}
}
DECLARE_API(RCWCleanupList)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR p_CleanupList = GetExpression(args);
VisitRcwArgs travArgs;
ZeroMemory(&travArgs,sizeof(VisitRcwArgs));
travArgs.bDetail = TRUE;
// We need to detect when !RCWCleanupList is called with an expression which evaluates to 0
// (to print out an Invalid parameter message), but at the same time we need to allow an
// empty argument list which would result in p_CleanupList equaling 0.
if (p_CleanupList || strlen(args) == 0)
{
HRESULT hr = g_sos->TraverseRCWCleanupList(p_CleanupList, (VISITRCWFORCLEANUP)VisitRcw, &travArgs);
if (SUCCEEDED(hr))
{
ExtOut("Free-Threaded Interfaces to be released: %d\n", travArgs.FTMCount);
ExtOut("MTA Interfaces to be released: %d\n", travArgs.MTACount);
ExtOut("STA Interfaces to be released: %d\n", travArgs.STACount);
}
else
{
ExtOut("An error occurred while traversing the cleanup list.\n");
}
}
else
{
ExtOut("Invalid parameter %s\n", args);
}
return Status;
}
#endif // FEATURE_COMINTEROP
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of the finalizer *
* queue. *
* *
\**********************************************************************/
DECLARE_API(FinalizeQueue)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL bDetail = FALSE;
BOOL bAllReady = FALSE;
BOOL bShort = FALSE;
BOOL dml = FALSE;
TADDR taddrMT = 0;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-detail", &bDetail, COBOOL, FALSE},
{"-allReady", &bAllReady, COBOOL, FALSE},
{"-short", &bShort, COBOOL, FALSE},
{"/d", &dml, COBOOL, FALSE},
{"-mt", &taddrMT, COHEX, TRUE},
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (!bShort)
{
DacpSyncBlockCleanupData dsbcd;
CLRDATA_ADDRESS sbCurrent = NULL;
ULONG cleanCount = 0;
while ((dsbcd.Request(g_sos,sbCurrent) == S_OK) && dsbcd.SyncBlockPointer)
{
if (bDetail)
{
if (cleanCount == 0) // print first time only
{
ExtOut("SyncBlocks to be cleaned by the finalizer thread:\n");
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"SyncBlock", "RCW", "CCW", "ComClassFactory");
}
ExtOut("%" POINTERSIZE "p %" POINTERSIZE "p %" POINTERSIZE "p %" POINTERSIZE "p\n",
(ULONG64) dsbcd.SyncBlockPointer,
(ULONG64) dsbcd.blockRCW,
(ULONG64) dsbcd.blockCCW,
(ULONG64) dsbcd.blockClassFactory);
}
cleanCount++;
sbCurrent = dsbcd.nextSyncBlock;
if (sbCurrent == NULL)
{
break;
}
}
ExtOut("SyncBlocks to be cleaned up: %d\n", cleanCount);
#ifdef FEATURE_COMINTEROP
VisitRcwArgs travArgs;
ZeroMemory(&travArgs,sizeof(VisitRcwArgs));
travArgs.bDetail = bDetail;
g_sos->TraverseRCWCleanupList(0, (VISITRCWFORCLEANUP) VisitRcw, &travArgs);
ExtOut("Free-Threaded Interfaces to be released: %d\n", travArgs.FTMCount);
ExtOut("MTA Interfaces to be released: %d\n", travArgs.MTACount);
ExtOut("STA Interfaces to be released: %d\n", travArgs.STACount);
#endif // FEATURE_COMINTEROP
// noRCW:
ExtOut("----------------------------------\n");
}
// GC Heap
DWORD dwNHeaps = GetGcHeapCount();
HeapStat hpStat;
if (!IsServerBuild())
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos) != S_OK)
{
ExtOut("Error requesting details\n");
return Status;
}
GatherOneHeapFinalization(heapDetails, &hpStat, bAllReady, bShort);
}
else
{
DWORD dwAllocSize;
if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
{
ExtOut("Failed to get GCHeaps: integer overflow\n");
return Status;
}
CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
{
ExtOut("Failed to get GCHeaps\n");
return Status;
}
for (DWORD n = 0; n < dwNHeaps; n ++)
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
{
ExtOut("Error requesting details\n");
return Status;
}
ExtOut("------------------------------\n");
ExtOut("Heap %d\n", n);
GatherOneHeapFinalization(heapDetails, &hpStat, bAllReady, bShort);
}
}
if (!bShort)
{
if (bAllReady)
{
PrintGCStat(&hpStat, "Statistics for all finalizable objects that are no longer rooted:\n");
}
else
{
PrintGCStat(&hpStat, "Statistics for all finalizable objects (including all objects ready for finalization):\n");
}
}
return Status;
}
enum {
// These are the values set in m_dwTransientFlags.
// Note that none of these flags survive a prejit save/restore.
M_CRST_NOTINITIALIZED = 0x00000001, // Used to prevent destruction of garbage m_crst
M_LOOKUPCRST_NOTINITIALIZED = 0x00000002,
SUPPORTS_UPDATEABLE_METHODS = 0x00000020,
CLASSES_FREED = 0x00000040,
HAS_PHONY_IL_RVAS = 0x00000080,
IS_EDIT_AND_CONTINUE = 0x00000200,
};
void ModuleMapTraverse(UINT index, CLRDATA_ADDRESS methodTable, LPVOID token)
{
ULONG32 rid = (ULONG32)(size_t)token;
NameForMT_s(TO_TADDR(methodTable), g_mdName, mdNameLen);
DMLOut("%s 0x%08x %S\n", DMLMethodTable(methodTable), (ULONG32)TokenFromRid(rid, index), g_mdName);
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a Module *
* for a given address *
* *
\**********************************************************************/
DECLARE_API(DumpModule)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR p_ModuleAddr = NULL;
BOOL bMethodTables = FALSE;
BOOL bProfilerModified = FALSE;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-mt", &bMethodTables, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
{"-prof", &bProfilerModified, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&p_ModuleAddr, COHEX}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg != 1)
{
ExtOut("Usage: DumpModule [-mt] <Module Address>\n");
return Status;
}
EnableDMLHolder dmlHolder(dml);
DacpModuleData module;
if ((Status=module.Request(g_sos, TO_CDADDR(p_ModuleAddr)))!=S_OK)
{
ExtOut("Fail to fill Module %p\n", SOS_PTR(p_ModuleAddr));
return Status;
}
WCHAR FileName[MAX_LONGPATH];
FileNameForModule (&module, FileName);
ExtOut("Name: %S\n", FileName[0] ? FileName : W("Unknown Module"));
ExtOut("Attributes: ");
if (module.bIsPEFile)
ExtOut("PEFile ");
if (module.bIsReflection)
ExtOut("Reflection ");
if (module.dwTransientFlags & SUPPORTS_UPDATEABLE_METHODS)
ExtOut("SupportsUpdateableMethods");
ExtOut("\n");
DMLOut("Assembly: %s\n", DMLAssembly(module.Assembly));
ExtOut("PEFile: %p\n", SOS_PTR(module.File));
ExtOut("ModuleId: %p\n", SOS_PTR(module.dwModuleID));
ExtOut("ModuleIndex: %p\n", SOS_PTR(module.dwModuleIndex));
ExtOut("LoaderHeap: %p\n", SOS_PTR(module.pLookupTableHeap));
ExtOut("TypeDefToMethodTableMap: %p\n", SOS_PTR(module.TypeDefToMethodTableMap));
ExtOut("TypeRefToMethodTableMap: %p\n", SOS_PTR(module.TypeRefToMethodTableMap));
ExtOut("MethodDefToDescMap: %p\n", SOS_PTR(module.MethodDefToDescMap));
ExtOut("FieldDefToDescMap: %p\n", SOS_PTR(module.FieldDefToDescMap));
ExtOut("MemberRefToDescMap: %p\n", SOS_PTR(module.MemberRefToDescMap));
ExtOut("FileReferencesMap: %p\n", SOS_PTR(module.FileReferencesMap));
ExtOut("AssemblyReferencesMap: %p\n", SOS_PTR(module.ManifestModuleReferencesMap));
if (module.ilBase && module.metadataStart)
ExtOut("MetaData start address: %p (%d bytes)\n", SOS_PTR(module.metadataStart), module.metadataSize);
if (bMethodTables)
{
ExtOut("\nTypes defined in this module\n\n");
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %s\n", "MT", "TypeDef", "Name");
ExtOut("------------------------------------------------------------------------------\n");
g_sos->TraverseModuleMap(TYPEDEFTOMETHODTABLE, TO_CDADDR(p_ModuleAddr), ModuleMapTraverse, (LPVOID)mdTypeDefNil);
ExtOut("\nTypes referenced in this module\n\n");
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %s\n", "MT", "TypeRef", "Name");
ExtOut("------------------------------------------------------------------------------\n");
g_sos->TraverseModuleMap(TYPEREFTOMETHODTABLE, TO_CDADDR(p_ModuleAddr), ModuleMapTraverse, (LPVOID)mdTypeDefNil);
}
if (bProfilerModified)
{
CLRDATA_ADDRESS methodDescs[kcMaxMethodDescsForProfiler];
int cMethodDescs;
ReleaseHolder<ISOSDacInterface7> sos7;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface7), &sos7)) &&
SUCCEEDED(sos7->GetMethodsWithProfilerModifiedIL(TO_CDADDR(p_ModuleAddr),
methodDescs,
kcMaxMethodDescsForProfiler,
&cMethodDescs)))
{
if (cMethodDescs > 0)
{
ExtOut("\nMethods in this module with profiler modified IL:\n");
for (int i = 0; i < cMethodDescs; ++i)
{
CLRDATA_ADDRESS md = methodDescs[i];
DMLOut("MethodDesc: %s ", DMLMethodDesc(md));
// Easiest to get full parameterized method name from ..::GetMethodName
if (g_sos->GetMethodDescName(md, mdNameLen, g_mdName, NULL) == S_OK)
{
ExtOut("Name: %S", g_mdName);
}
struct DacpProfilerILData ilData;
if (SUCCEEDED(sos7->GetProfilerModifiedILInformation(md, &ilData)))
{
if (ilData.type == DacpProfilerILData::ILModified)
{
ExtOut(" (IL Modified)");
}
else if (ilData.type == DacpProfilerILData::ReJITModified)
{
ExtOut(" (ReJIT Modified)");
}
}
ExtOut("\n");
}
if (cMethodDescs == kcMaxMethodDescsForProfiler)
{
ExtOut("Profiler modified methods truncated, reached max value.\n");
}
}
else
{
ExtOut("\nThis module has no methods with profiler modified IL.\n");
}
}
else
{
ExtOut("\nThis runtime version does not support listing the profiler modified functions.\n");
}
HRESULT GetMethodsWithProfilerModifiedIL(CLRDATA_ADDRESS module, CLRDATA_ADDRESS *methodDescs, int cMethodDescs, int *pcMethodDescs);
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a Domain *
* for a given address *
* *
\**********************************************************************/
DECLARE_API(DumpDomain)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR p_DomainAddr = 0;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&p_DomainAddr, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
DacpAppDomainStoreData adsData;
if ((Status=adsData.Request(g_sos))!=S_OK)
{
ExtOut("Unable to get AppDomain information\n");
return Status;
}
if (p_DomainAddr)
{
DacpAppDomainData appDomain1;
if ((Status=appDomain1.Request(g_sos, TO_CDADDR(p_DomainAddr)))!=S_OK)
{
ExtOut("Fail to fill AppDomain\n");
return Status;
}
ExtOut("--------------------------------------\n");
if (p_DomainAddr == adsData.sharedDomain)
{
DMLOut("Shared Domain: %s\n", DMLDomain(adsData.sharedDomain));
}
else if (p_DomainAddr == adsData.systemDomain)
{
DMLOut("System Domain: %s\n", DMLDomain(adsData.systemDomain));
}
else
{
DMLOut("Domain %d:%s %s\n", appDomain1.dwId, (appDomain1.dwId >= 10) ? "" : " ", DMLDomain(p_DomainAddr));
}
DomainInfo(&appDomain1);
return Status;
}
ExtOut("--------------------------------------\n");
DMLOut("System Domain: %s\n", DMLDomain(adsData.systemDomain));
DacpAppDomainData appDomain;
if ((Status=appDomain.Request(g_sos,adsData.systemDomain))!=S_OK)
{
ExtOut("Unable to get system domain info.\n");
return Status;
}
DomainInfo(&appDomain);
if (adsData.sharedDomain != NULL)
{
ExtOut("--------------------------------------\n");
DMLOut("Shared Domain: %s\n", DMLDomain(adsData.sharedDomain));
if ((Status=appDomain.Request(g_sos, adsData.sharedDomain))!=S_OK)
{
ExtOut("Unable to get shared domain info\n");
return Status;
}
DomainInfo(&appDomain);
}
ArrayHolder<CLRDATA_ADDRESS> pArray = new NOTHROW CLRDATA_ADDRESS[adsData.DomainCount];
if (pArray==NULL)
{
ReportOOM();
return Status;
}
if ((Status=g_sos->GetAppDomainList(adsData.DomainCount, pArray, NULL))!=S_OK)
{
ExtOut("Unable to get array of AppDomains\n");
return Status;
}
for (int n=0;n<adsData.DomainCount;n++)
{
if (IsInterrupt())
break;
if ((Status=appDomain.Request(g_sos, pArray[n])) != S_OK)
{
ExtOut("Failed to get appdomain %p, error %lx\n", SOS_PTR(pArray[n]), Status);
return Status;
}
ExtOut("--------------------------------------\n");
DMLOut("Domain %d:%s %s\n", appDomain.dwId, (appDomain.dwId >= 10) ? "" : " ", DMLDomain(pArray[n]));
DomainInfo(&appDomain);
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a Assembly *
* for a given address *
* *
\**********************************************************************/
DECLARE_API(DumpAssembly)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
DWORD_PTR p_AssemblyAddr = 0;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&p_AssemblyAddr, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (p_AssemblyAddr == 0)
{
ExtOut("Invalid Assembly %s\n", args);
return Status;
}
DacpAssemblyData Assembly;
if ((Status=Assembly.Request(g_sos, TO_CDADDR(p_AssemblyAddr)))!=S_OK)
{
ExtOut("Fail to fill Assembly\n");
return Status;
}
DMLOut("Parent Domain: %s\n", DMLDomain(Assembly.ParentDomain));
if (g_sos->GetAssemblyName(TO_CDADDR(p_AssemblyAddr), mdNameLen, g_mdName, NULL)==S_OK)
ExtOut("Name: %S\n", g_mdName);
else
ExtOut("Name: Unknown\n");
AssemblyInfo(&Assembly);
return Status;
}
String GetHostingCapabilities(DWORD hostConfig)
{
String result;
bool bAnythingPrinted = false;
#define CHK_AND_PRINT(hType,hStr) \
if (hostConfig & (hType)) { \
if (bAnythingPrinted) result += ", "; \
result += hStr; \
bAnythingPrinted = true; \
}
CHK_AND_PRINT(CLRMEMORYHOSTED, "Memory");
CHK_AND_PRINT(CLRTASKHOSTED, "Task");
CHK_AND_PRINT(CLRSYNCHOSTED, "Sync");
CHK_AND_PRINT(CLRTHREADPOOLHOSTED, "Threadpool");
CHK_AND_PRINT(CLRIOCOMPLETIONHOSTED, "IOCompletion");
CHK_AND_PRINT(CLRASSEMBLYHOSTED, "Assembly");
CHK_AND_PRINT(CLRGCHOSTED, "GC");
CHK_AND_PRINT(CLRSECURITYHOSTED, "Security");
#undef CHK_AND_PRINT
return result;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the managed threads *
* *
\**********************************************************************/
HRESULT PrintThreadsFromThreadStore(BOOL bMiniDump, BOOL bPrintLiveThreadsOnly)
{
HRESULT Status;
DacpThreadStoreData ThreadStore;
if ((Status = ThreadStore.Request(g_sos)) != S_OK)
{
Print("Failed to request ThreadStore\n");
return Status;
}
TableOutput table(2, 17);
table.WriteRow("ThreadCount:", Decimal(ThreadStore.threadCount));
table.WriteRow("UnstartedThread:", Decimal(ThreadStore.unstartedThreadCount));
table.WriteRow("BackgroundThread:", Decimal(ThreadStore.backgroundThreadCount));
table.WriteRow("PendingThread:", Decimal(ThreadStore.pendingThreadCount));
table.WriteRow("DeadThread:", Decimal(ThreadStore.deadThreadCount));
if (ThreadStore.fHostConfig & ~CLRHOSTED)
{
String hosting = "yes";
hosting += " (";
hosting += GetHostingCapabilities(ThreadStore.fHostConfig);
hosting += ")";
table.WriteRow("Hosted Runtime:", hosting);
}
else
{
table.WriteRow("Hosted Runtime:", "no");
}
const bool hosted = (ThreadStore.fHostConfig & CLRTASKHOSTED) != 0;
table.ReInit(hosted ? 12 : 11, POINTERSIZE_HEX);
table.SetWidths(10, 4, 4, 4, _max(9, POINTERSIZE_HEX),
8, 11, 1+POINTERSIZE_HEX*2, POINTERSIZE_HEX,
5, 3, POINTERSIZE_HEX);
table.SetColAlignment(0, AlignRight);
table.SetColAlignment(1, AlignRight);
table.SetColAlignment(2, AlignRight);
table.SetColAlignment(4, AlignRight);
table.WriteColumn(8, "Lock");
table.WriteRow("DBG", "ID", "OSID", "ThreadOBJ", "State", "GC Mode", "GC Alloc Context", "Domain", "Count", "Apt");
if (hosted)
table.WriteColumn("Fiber");
table.WriteColumn("Exception");
DacpThreadData Thread;
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread)
{
if (IsInterrupt())
break;
if ((Status = Thread.Request(g_sos, CurThread)) != S_OK)
{
PrintLn("Failed to request Thread at ", Pointer(CurThread));
return Status;
}
BOOL bSwitchedOutFiber = Thread.osThreadId == SWITCHED_OUT_FIBER_OSID;
if (!IsKernelDebugger())
{
ULONG id = 0;
if (bSwitchedOutFiber)
{
table.WriteColumn(0, "<<<< ");
}
else if (g_ExtSystem->GetThreadIdBySystemId(Thread.osThreadId, &id) == S_OK)
{
table.WriteColumn(0, Decimal(id));
}
else if (bPrintLiveThreadsOnly)
{
CurThread = Thread.nextThread;
continue;
}
else
{
table.WriteColumn(0, "XXXX ");
}
}
table.WriteColumn(1, Decimal(Thread.corThreadId));
table.WriteColumn(2, ThreadID(bSwitchedOutFiber ? 0 : Thread.osThreadId));
table.WriteColumn(3, Pointer(CurThread));
table.WriteColumn(4, ThreadState(Thread.state));
table.WriteColumn(5, Thread.preemptiveGCDisabled == 1 ? "Cooperative" : "Preemptive");
table.WriteColumnFormat(6, "%p:%p", Thread.allocContextPtr, Thread.allocContextLimit);
if (Thread.domain)
{
table.WriteColumn(7, AppDomainPtr(Thread.domain));
}
else
{
CLRDATA_ADDRESS domain = 0;
if (FAILED(g_sos->GetDomainFromContext(Thread.context, &domain)))
table.WriteColumn(7, "<error>");
else
table.WriteColumn(7, AppDomainPtr(domain));
}
table.WriteColumn(8, Decimal(Thread.lockCount));
// Apartment state
#ifndef FEATURE_PAL
DWORD_PTR OleTlsDataAddr;
if (IsWindowsTarget() && !bSwitchedOutFiber
&& SafeReadMemory(Thread.teb + offsetof(TEB, ReservedForOle),
&OleTlsDataAddr,
sizeof(OleTlsDataAddr), NULL) && OleTlsDataAddr != 0)
{
DWORD AptState;
if (SafeReadMemory(OleTlsDataAddr+offsetof(SOleTlsData,dwFlags),
&AptState,
sizeof(AptState), NULL))
{
if (AptState & OLETLS_APARTMENTTHREADED)
table.WriteColumn(9, "STA");
else if (AptState & OLETLS_MULTITHREADED)
table.WriteColumn(9, "MTA");
else if (AptState & OLETLS_INNEUTRALAPT)
table.WriteColumn(9, "NTA");
else
table.WriteColumn(9, "Ukn");
}
else
{
table.WriteColumn(9, "Ukn");
}
}
else
#endif // FEATURE_PAL
table.WriteColumn(9, "Ukn");
if (hosted)
table.WriteColumn(10, Thread.fiberData);
WString lastCol;
if (CurThread == ThreadStore.finalizerThread)
lastCol += W("(Finalizer) ");
if (CurThread == ThreadStore.gcThread)
lastCol += W("(GC) ");
const int TS_TPWorkerThread = 0x01000000; // is this a threadpool worker thread?
const int TS_CompletionPortThread = 0x08000000; // is this is a completion port thread?
if (Thread.state & TS_TPWorkerThread)
lastCol += W("(Threadpool Worker) ");
else if (Thread.state & TS_CompletionPortThread)
lastCol += W("(Threadpool Completion Port) ");
TADDR taLTOH;
if (Thread.lastThrownObjectHandle && SafeReadMemory(TO_TADDR(Thread.lastThrownObjectHandle),
&taLTOH, sizeof(taLTOH), NULL) && taLTOH)
{
TADDR taMT;
if (SafeReadMemory(taLTOH, &taMT, sizeof(taMT), NULL))
{
if (NameForMT_s(taMT, g_mdName, mdNameLen))
lastCol += WString(g_mdName) + W(" ") + ExceptionPtr(taLTOH);
else
lastCol += WString(W("<Invalid Object> (")) + Pointer(taLTOH) + W(")");
// Print something if there are nested exceptions on the thread
if (Thread.firstNestedException)
lastCol += W(" (nested exceptions)");
}
}
table.WriteColumn(lastCol);
CurThread = Thread.nextThread;
}
return Status;
}
#ifndef FEATURE_PAL
HRESULT PrintSpecialThreads()
{
Print("\n");
DWORD dwCLRTLSDataIndex = 0;
HRESULT Status = g_sos->GetTLSIndex(&dwCLRTLSDataIndex);
if (!SUCCEEDED (Status))
{
Print("Failed to retrieve Tls Data index\n");
return Status;
}
ULONG ulOriginalThreadID = 0;
Status = g_ExtSystem->GetCurrentThreadId (&ulOriginalThreadID);
if (!SUCCEEDED (Status))
{
Print("Failed to require current Thread ID\n");
return Status;
}
ULONG ulTotalThreads = 0;
Status = g_ExtSystem->GetNumberThreads (&ulTotalThreads);
if (!SUCCEEDED (Status))
{
Print("Failed to require total thread number\n");
return Status;
}
TableOutput table(3, 4, AlignRight, 5);
table.WriteRow("", "OSID", "Special thread type");
for (ULONG ulThread = 0; ulThread < ulTotalThreads; ulThread++)
{
ULONG Id = 0;
ULONG SysId = 0;
HRESULT threadStatus = g_ExtSystem->GetThreadIdsByIndex(ulThread, 1, &Id, &SysId);
if (!SUCCEEDED (threadStatus))
{
PrintLn("Failed to get thread ID for thread ", Decimal(ulThread));
continue;
}
threadStatus = g_ExtSystem->SetCurrentThreadId(Id);
if (!SUCCEEDED (threadStatus))
{
PrintLn("Failed to switch to thread ", ThreadID(SysId));
continue;
}
CLRDATA_ADDRESS cdaTeb = 0;
threadStatus = g_ExtSystem->GetCurrentThreadTeb(&cdaTeb);
if (!SUCCEEDED (threadStatus))
{
PrintLn("Failed to get Teb for Thread ", ThreadID(SysId));
continue;
}
TADDR CLRTLSDataAddr = 0;
TADDR tlsArrayAddr = NULL;
if (!SafeReadMemory (TO_TADDR(cdaTeb) + WINNT_OFFSETOF__TEB__ThreadLocalStoragePointer , &tlsArrayAddr, sizeof (void**), NULL))
{
PrintLn("Failed to get Tls expansion slots for thread ", ThreadID(SysId));
continue;
}
if (tlsArrayAddr == NULL)
{
continue;
}
TADDR moduleTlsDataAddr = 0;
if (!SafeReadMemory (tlsArrayAddr + sizeof (void*) * (dwCLRTLSDataIndex & 0xFFFF), &moduleTlsDataAddr, sizeof (void**), NULL))
{
PrintLn("Failed to get Tls expansion slots for thread ", ThreadID(SysId));
continue;
}
CLRTLSDataAddr = moduleTlsDataAddr + ((dwCLRTLSDataIndex & 0x7FFF0000) >> 16) + OFFSETOF__TLS__tls_EETlsData;
TADDR CLRTLSData = NULL;
if (!SafeReadMemory (CLRTLSDataAddr, &CLRTLSData, sizeof (TADDR), NULL))
{
PrintLn("Failed to get CLR Tls data for thread ", ThreadID(SysId));
continue;
}
if (CLRTLSData == NULL)
{
continue;
}
size_t ThreadType = 0;
if (!SafeReadMemory (CLRTLSData + sizeof (TADDR) * TlsIdx_ThreadType, &ThreadType, sizeof (size_t), NULL))
{
PrintLn("Failed to get thread type info not found for thread ", ThreadID(SysId));
continue;
}
if (ThreadType == 0)
{
continue;
}
table.WriteColumn(0, Decimal(Id));
table.WriteColumn(1, ThreadID(SysId));
String type;
if (ThreadType & ThreadType_GC)
{
type += "GC ";
}
if (ThreadType & ThreadType_Timer)
{
type += "Timer ";
}
if (ThreadType & ThreadType_Gate)
{
type += "Gate ";
}
if (ThreadType & ThreadType_DbgHelper)
{
type += "DbgHelper ";
}
if (ThreadType & ThreadType_Shutdown)
{
type += "Shutdown ";
}
if (ThreadType & ThreadType_DynamicSuspendEE)
{
type += "SuspendEE ";
}
if (ThreadType & ThreadType_Finalizer)
{
type += "Finalizer ";
}
if (ThreadType & ThreadType_ADUnloadHelper)
{
type += "ADUnloadHelper ";
}
if (ThreadType & ThreadType_ShutdownHelper)
{
type += "ShutdownHelper ";
}
if (ThreadType & ThreadType_Threadpool_IOCompletion)
{
type += "IOCompletion ";
}
if (ThreadType & ThreadType_Threadpool_Worker)
{
type += "ThreadpoolWorker ";
}
if (ThreadType & ThreadType_Wait)
{
type += "Wait ";
}
if (ThreadType & ThreadType_ProfAPI_Attach)
{
type += "ProfilingAPIAttach ";
}
if (ThreadType & ThreadType_ProfAPI_Detach)
{
type += "ProfilingAPIDetach ";
}
table.WriteColumn(2, type);
}
Status = g_ExtSystem->SetCurrentThreadId (ulOriginalThreadID);
if (!SUCCEEDED (Status))
{
ExtOut("Failed to switch to original thread\n");
return Status;
}
return Status;
}
#endif //FEATURE_PAL
HRESULT SwitchToExceptionThread()
{
HRESULT Status;
DacpThreadStoreData ThreadStore;
if ((Status = ThreadStore.Request(g_sos)) != S_OK)
{
Print("Failed to request ThreadStore\n");
return Status;
}
DacpThreadData Thread;
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread)
{
if (IsInterrupt())
break;
if ((Status = Thread.Request(g_sos, CurThread)) != S_OK)
{
PrintLn("Failed to request Thread at ", Pointer(CurThread));
return Status;
}
TADDR taLTOH;
if (Thread.lastThrownObjectHandle != NULL)
{
if (SafeReadMemory(TO_TADDR(Thread.lastThrownObjectHandle), &taLTOH, sizeof(taLTOH), NULL))
{
if (taLTOH != NULL)
{
ULONG id;
if (g_ExtSystem->GetThreadIdBySystemId(Thread.osThreadId, &id) == S_OK)
{
if (g_ExtSystem->SetCurrentThreadId(id) == S_OK)
{
PrintLn("Found managed exception on thread ", ThreadID(Thread.osThreadId));
break;
}
}
}
}
}
CurThread = Thread.nextThread;
}
return Status;
}
struct ThreadStateTable
{
unsigned int State;
const char * Name;
};
static const struct ThreadStateTable ThreadStates[] =
{
{0x1, "Thread Abort Requested"},
{0x2, "GC Suspend Pending"},
{0x4, "User Suspend Pending"},
{0x8, "Debug Suspend Pending"},
{0x10, "GC On Transitions"},
{0x20, "Legal to Join"},
{0x40, "Yield Requested"},
{0x80, "Hijacked by the GC"},
{0x100, "Blocking GC for Stack Overflow"},
{0x200, "Background"},
{0x400, "Unstarted"},
{0x800, "Dead"},
{0x1000, "CLR Owns"},
{0x2000, "CoInitialized"},
{0x4000, "In Single Threaded Apartment"},
{0x8000, "In Multi Threaded Apartment"},
{0x10000, "Reported Dead"},
{0x20000, "Fully initialized"},
{0x40000, "Task Reset"},
{0x80000, "Sync Suspended"},
{0x100000, "Debug Will Sync"},
{0x200000, "Stack Crawl Needed"},
{0x400000, "Suspend Unstarted"},
{0x800000, "Aborted"},
{0x1000000, "Thread Pool Worker Thread"},
{0x2000000, "Interruptible"},
{0x4000000, "Interrupted"},
{0x8000000, "Completion Port Thread"},
{0x10000000, "Abort Initiated"},
{0x20000000, "Finalized"},
{0x40000000, "Failed to Start"},
{0x80000000, "Detached"},
};
DECLARE_API(ThreadState)
{
INIT_API_NODAC();
size_t state = GetExpression(args);
int count = 0;
if (state)
{
for (unsigned int i = 0; i < _countof(ThreadStates); ++i)
if (state & ThreadStates[i].State)
{
ExtOut(" %s\n", ThreadStates[i].Name);
count++;
}
}
// If we did not find any thread states, print out a message to let the user
// know that the function is working correctly.
if (count == 0)
ExtOut(" No thread states for '%s'\n", args);
return Status;
}
DECLARE_API(Threads)
{
INIT_API();
BOOL bPrintSpecialThreads = FALSE;
BOOL bPrintLiveThreadsOnly = FALSE;
BOOL bSwitchToManagedExceptionThread = FALSE;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-special", &bPrintSpecialThreads, COBOOL, FALSE},
{"-live", &bPrintLiveThreadsOnly, COBOOL, FALSE},
{"-managedexception", &bSwitchToManagedExceptionThread, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
if (bSwitchToManagedExceptionThread)
{
return SwitchToExceptionThread();
}
// We need to support minidumps for this command.
BOOL bMiniDump = IsMiniDumpFile();
EnableDMLHolder dmlHolder(dml);
try
{
Status = PrintThreadsFromThreadStore(bMiniDump, bPrintLiveThreadsOnly);
if (bPrintSpecialThreads)
{
#ifdef FEATURE_PAL
Print("\n-special not supported.\n");
#else //FEATURE_PAL
BOOL bSupported = true;
if (!IsWindowsTarget())
{
Print("Special thread information is only supported on Windows targets.\n");
bSupported = false;
}
else if (bMiniDump)
{
Print("Special thread information is not available in mini dumps.\n");
bSupported = false;
}
if (bSupported)
{
HRESULT Status2 = PrintSpecialThreads();
if (!SUCCEEDED(Status2))
Status = Status2;
}
#endif // FEATURE_PAL
}
}
catch (sos::Exception &e)
{
ExtOut("%s\n", e.what());
}
return Status;
}
#ifndef FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the Watson Buckets. *
* *
\**********************************************************************/
DECLARE_API(WatsonBuckets)
{
INIT_API();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
// We don't need to support minidumps for this command.
if (IsMiniDumpFile())
{
ExtOut("Not supported on mini dumps.\n");
}
// Get the current managed thread.
CLRDATA_ADDRESS threadAddr = GetCurrentManagedThread();
DacpThreadData Thread;
if ((threadAddr == NULL) || ((Status = Thread.Request(g_sos, threadAddr)) != S_OK))
{
ExtOut("The current thread is unmanaged\n");
return Status;
}
// Get the definition of GenericModeBlock.
#include <msodw.h>
GenericModeBlock gmb;
if ((Status = g_sos->GetClrWatsonBuckets(threadAddr, &gmb)) != S_OK)
{
ExtOut("Can't get Watson Buckets\n");
return Status;
}
ExtOut("Watson Bucket parameters:\n");
ExtOut("b1: %S\n", gmb.wzP1);
ExtOut("b2: %S\n", gmb.wzP2);
ExtOut("b3: %S\n", gmb.wzP3);
ExtOut("b4: %S\n", gmb.wzP4);
ExtOut("b5: %S\n", gmb.wzP5);
ExtOut("b6: %S\n", gmb.wzP6);
ExtOut("b7: %S\n", gmb.wzP7);
ExtOut("b8: %S\n", gmb.wzP8);
ExtOut("b9: %S\n", gmb.wzP9);
return Status;
} // WatsonBuckets()
#endif // FEATURE_PAL
struct PendingBreakpoint
{
WCHAR szModuleName[MAX_LONGPATH];
WCHAR szFunctionName[mdNameLen];
WCHAR szFilename[MAX_LONGPATH];
DWORD lineNumber;
TADDR pModule;
DWORD ilOffset;
mdMethodDef methodToken;
void SetModule(TADDR module)
{
pModule = module;
}
bool ModuleMatches(TADDR compare)
{
return (compare == pModule);
}
PendingBreakpoint *pNext;
PendingBreakpoint() : lineNumber(0), ilOffset(0), methodToken(0), pNext(NULL)
{
szModuleName[0] = L'\0';
szFunctionName[0] = L'\0';
szFilename[0] = L'\0';
}
};
void IssueDebuggerBPCommand ( CLRDATA_ADDRESS addr )
{
const int MaxBPsCached = 1024;
static CLRDATA_ADDRESS alreadyPlacedBPs[MaxBPsCached];
static int curLimit = 0;
// on ARM the debugger requires breakpoint addresses to be sanitized
if (IsDbgTargetArm())
#ifndef FEATURE_PAL
addr &= ~THUMB_CODE;
#else
addr |= THUMB_CODE; // lldb expects thumb code bit set
#endif
// if we overflowed our cache consider all new BPs unique...
BOOL bUnique = curLimit >= MaxBPsCached;
if (!bUnique)
{
bUnique = TRUE;
for (int i = 0; i < curLimit; ++i)
{
if (alreadyPlacedBPs[i] == addr)
{
bUnique = FALSE;
break;
}
}
}
if (bUnique)
{
char buffer[64]; // sufficient for "bp <pointersize>"
static WCHAR wszNameBuffer[1024]; // should be large enough
// get the MethodDesc name
CLRDATA_ADDRESS pMD;
if (g_sos->GetMethodDescPtrFromIP(addr, &pMD) != S_OK
|| g_sos->GetMethodDescName(pMD, 1024, wszNameBuffer, NULL) != S_OK)
{
wcscpy_s(wszNameBuffer, _countof(wszNameBuffer), W("UNKNOWN"));
}
#ifndef FEATURE_PAL
sprintf_s(buffer, _countof(buffer), "bp %p", (void*) (size_t) addr);
#else
sprintf_s(buffer, _countof(buffer), "breakpoint set --address 0x%p", (void*) (size_t) addr);
#endif
ExtOut("Setting breakpoint: %s [%S]\n", buffer, wszNameBuffer);
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
if (curLimit < MaxBPsCached)
{
alreadyPlacedBPs[curLimit++] = addr;
}
}
}
class Breakpoints
{
PendingBreakpoint* m_breakpoints;
public:
Breakpoints()
{
m_breakpoints = NULL;
}
~Breakpoints()
{
PendingBreakpoint *pCur = m_breakpoints;
while(pCur)
{
PendingBreakpoint *pNext = pCur->pNext;
delete pCur;
pCur = pNext;
}
m_breakpoints = NULL;
}
void Add(__in_z LPWSTR szModule, __in_z LPWSTR szName, TADDR mod, DWORD ilOffset)
{
if (!IsIn(szModule, szName, mod))
{
PendingBreakpoint *pNew = new PendingBreakpoint();
wcscpy_s(pNew->szModuleName, MAX_LONGPATH, szModule);
wcscpy_s(pNew->szFunctionName, mdNameLen, szName);
pNew->SetModule(mod);
pNew->ilOffset = ilOffset;
pNew->pNext = m_breakpoints;
m_breakpoints = pNew;
}
}
void Add(__in_z LPWSTR szModule, __in_z LPWSTR szName, mdMethodDef methodToken, TADDR mod, DWORD ilOffset)
{
if (!IsIn(methodToken, mod, ilOffset))
{
PendingBreakpoint *pNew = new PendingBreakpoint();
wcscpy_s(pNew->szModuleName, MAX_LONGPATH, szModule);
wcscpy_s(pNew->szFunctionName, mdNameLen, szName);
pNew->methodToken = methodToken;
pNew->SetModule(mod);
pNew->ilOffset = ilOffset;
pNew->pNext = m_breakpoints;
m_breakpoints = pNew;
}
}
void Add(__in_z LPWSTR szFilename, DWORD lineNumber, TADDR mod)
{
if (!IsIn(szFilename, lineNumber, mod))
{
PendingBreakpoint *pNew = new PendingBreakpoint();
wcscpy_s(pNew->szFilename, MAX_LONGPATH, szFilename);
pNew->lineNumber = lineNumber;
pNew->SetModule(mod);
pNew->pNext = m_breakpoints;
m_breakpoints = pNew;
}
}
void Add(__in_z LPWSTR szFilename, DWORD lineNumber, mdMethodDef methodToken, TADDR mod, DWORD ilOffset)
{
if (!IsIn(methodToken, mod, ilOffset))
{
PendingBreakpoint *pNew = new PendingBreakpoint();
wcscpy_s(pNew->szFilename, MAX_LONGPATH, szFilename);
pNew->lineNumber = lineNumber;
pNew->methodToken = methodToken;
pNew->SetModule(mod);
pNew->ilOffset = ilOffset;
pNew->pNext = m_breakpoints;
m_breakpoints = pNew;
}
}
//returns true if updates are still needed for this module, FALSE if all BPs are now bound
BOOL Update(TADDR mod, BOOL isNewModule)
{
BOOL bNeedUpdates = FALSE;
PendingBreakpoint *pCur = NULL;
if(isNewModule)
{
SymbolReader symbolReader;
SymbolReader* pSymReader = &symbolReader;
if(LoadSymbolsForModule(mod, &symbolReader) != S_OK)
pSymReader = NULL;
// Get tokens for any modules that match. If there was a change,
// update notifications.
pCur = m_breakpoints;
while(pCur)
{
PendingBreakpoint *pNext = pCur->pNext;
ResolvePendingNonModuleBoundBreakpoint(mod, pCur, pSymReader);
pCur = pNext;
}
}
pCur = m_breakpoints;
while(pCur)
{
PendingBreakpoint *pNext = pCur->pNext;
if (ResolvePendingBreakpoint(mod, pCur))
{
bNeedUpdates = TRUE;
}
pCur = pNext;
}
return bNeedUpdates;
}
BOOL UpdateKnownCodeAddress(TADDR mod, CLRDATA_ADDRESS bpLocation)
{
PendingBreakpoint *pCur = m_breakpoints;
BOOL bpSet = FALSE;
while(pCur)
{
PendingBreakpoint *pNext = pCur->pNext;
if (pCur->ModuleMatches(mod))
{
IssueDebuggerBPCommand(bpLocation);
bpSet = TRUE;
break;
}
pCur = pNext;
}
return bpSet;
}
void RemovePendingForModule(TADDR mod)
{
PendingBreakpoint *pCur = m_breakpoints;
while(pCur)
{
PendingBreakpoint *pNext = pCur->pNext;
if (pCur->ModuleMatches(mod))
{
// Delete the current node, and keep going
Delete(pCur);
}
pCur = pNext;
}
}
void ListBreakpoints()
{
PendingBreakpoint *pCur = m_breakpoints;
size_t iBreakpointIndex = 1;
ExtOut(SOSPrefix "bpmd pending breakpoint list\n Breakpoint index - Location, ModuleID, Method Token\n");
while(pCur)
{
//windbg likes to format %p as always being 64 bits
ULONG64 modulePtr = (ULONG64)pCur->pModule;
if(pCur->szModuleName[0] != L'\0')
ExtOut("%d - %ws!%ws+%d, 0x%p, 0x%08x\n", iBreakpointIndex, pCur->szModuleName, pCur->szFunctionName, pCur->ilOffset, modulePtr, pCur->methodToken);
else
ExtOut("%d - %ws:%d, 0x%p, 0x%08x\n", iBreakpointIndex, pCur->szFilename, pCur->lineNumber, modulePtr, pCur->methodToken);
iBreakpointIndex++;
pCur = pCur->pNext;
}
}
#ifndef FEATURE_PAL
void SaveBreakpoints(FILE* pFile)
{
PendingBreakpoint *pCur = m_breakpoints;
while(pCur)
{
if(pCur->szModuleName[0] != L'\0')
fprintf_s(pFile, "!bpmd %ws %ws %d\n", pCur->szModuleName, pCur->szFunctionName, pCur->ilOffset);
else
fprintf_s(pFile, "!bpmd %ws:%d\n", pCur->szFilename, pCur->lineNumber);
pCur = pCur->pNext;
}
}
#endif
void CleanupNotifications()
{
#ifdef FEATURE_PAL
if (m_breakpoints == NULL)
{
g_ExtServices->ClearExceptionCallback();
}
#endif
}
void ClearBreakpoint(size_t breakPointToClear)
{
PendingBreakpoint *pCur = m_breakpoints;
size_t iBreakpointIndex = 1;
while(pCur)
{
if (breakPointToClear == iBreakpointIndex)
{
ExtOut("%d - %ws, %ws, %p\n", iBreakpointIndex, pCur->szModuleName, pCur->szFunctionName, pCur->pModule);
ExtOut("Cleared\n");
Delete(pCur);
break;
}
iBreakpointIndex++;
pCur = pCur->pNext;
}
if (pCur == NULL)
{
ExtOut("Invalid pending breakpoint index.\n");
}
CleanupNotifications();
}
void ClearAllBreakpoints()
{
size_t iBreakpointIndex = 1;
for (PendingBreakpoint *pCur = m_breakpoints; pCur != NULL; )
{
PendingBreakpoint* pNext = pCur->pNext;
Delete(pCur);
iBreakpointIndex++;
pCur = pNext;
}
CleanupNotifications();
ExtOut("All pending breakpoints cleared.\n");
}
HRESULT LoadSymbolsForModule(TADDR mod, SymbolReader* pSymbolReader)
{
HRESULT Status = S_OK;
ToRelease<IXCLRDataModule> pModule;
IfFailRet(g_sos->GetModule(mod, &pModule));
ToRelease<IMetaDataImport> pMDImport = NULL;
pModule->QueryInterface(IID_IMetaDataImport, (LPVOID *) &pMDImport);
IfFailRet(pSymbolReader->LoadSymbols(pMDImport, pModule));
return S_OK;
}
HRESULT ResolvePendingNonModuleBoundBreakpoint(__in_z WCHAR* pFilename, DWORD lineNumber, TADDR mod, SymbolReader* pSymbolReader)
{
HRESULT Status = S_OK;
if(pSymbolReader == NULL)
return S_FALSE; // no symbols, can't bind here
mdMethodDef methodDef;
ULONG32 ilOffset;
if(FAILED(Status = pSymbolReader->ResolveSequencePoint(pFilename, lineNumber, &methodDef, &ilOffset)))
{
return S_FALSE; // not binding in a module is typical
}
Add(pFilename, lineNumber, methodDef, mod, ilOffset);
return Status;
}
HRESULT ResolvePendingNonModuleBoundBreakpoint(__in_z WCHAR* pModuleName, __in_z WCHAR* pMethodName, TADDR mod, DWORD ilOffset)
{
HRESULT Status = S_OK;
char szName[mdNameLen];
int numModule;
ToRelease<IXCLRDataModule> module;
IfFailRet(g_sos->GetModule(mod, &module));
WideCharToMultiByte(CP_ACP, 0, pModuleName, (int)(_wcslen(pModuleName) + 1), szName, mdNameLen, NULL, NULL);
ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(szName, &numModule);
if (moduleList == NULL)
{
ExtOut("Failed to request module list.\n");
return E_FAIL;
}
for (int i = 0; i < numModule; i++)
{
// If any one entry in moduleList matches, then the current PendingBreakpoint
// is the right one.
if(moduleList[i] != TO_TADDR(mod))
continue;
CLRDATA_ENUM h;
if (module->StartEnumMethodDefinitionsByName(pMethodName, 0, &h) == S_OK)
{
IXCLRDataMethodDefinition *pMeth = NULL;
while (module->EnumMethodDefinitionByName(&h, &pMeth) == S_OK)
{
mdMethodDef methodToken;
ToRelease<IXCLRDataModule> pUnusedModule;
IfFailRet(pMeth->GetTokenAndScope(&methodToken, &pUnusedModule));
Add(pModuleName, pMethodName, methodToken, mod, ilOffset);
pMeth->Release();
}
module->EndEnumMethodDefinitionsByName(h);
}
}
return S_OK;
}
// Return TRUE if there might be more instances that will be JITTED later
static BOOL ResolveMethodInstances(IXCLRDataMethodDefinition *pMeth, DWORD ilOffset)
{
BOOL bFoundCode = FALSE;
BOOL bNeedDefer = FALSE;
CLRDATA_ENUM h1;
if (pMeth->StartEnumInstances (NULL, &h1) == S_OK)
{
IXCLRDataMethodInstance *inst = NULL;
while (pMeth->EnumInstance (&h1, &inst) == S_OK)
{
BOOL foundByIlOffset = FALSE;
ULONG32 rangesNeeded = 0;
if(inst->GetAddressRangesByILOffset(ilOffset, 0, &rangesNeeded, NULL) == S_OK)
{
ArrayHolder<CLRDATA_ADDRESS_RANGE> ranges = new NOTHROW CLRDATA_ADDRESS_RANGE[rangesNeeded];
if (ranges != NULL)
{
if (inst->GetAddressRangesByILOffset(ilOffset, rangesNeeded, NULL, ranges) == S_OK)
{
for (DWORD i = 0; i < rangesNeeded; i++)
{
IssueDebuggerBPCommand(ranges[i].startAddress);
bFoundCode = TRUE;
foundByIlOffset = TRUE;
}
}
}
}
if (!foundByIlOffset && ilOffset == 0)
{
CLRDATA_ADDRESS addr = 0;
if (inst->GetRepresentativeEntryAddress(&addr) == S_OK)
{
IssueDebuggerBPCommand(addr);
bFoundCode = TRUE;
}
}
}
pMeth->EndEnumInstances (h1);
}
// if this is a generic method we need to add a deferred bp
BOOL bGeneric = FALSE;
pMeth->HasClassOrMethodInstantiation(&bGeneric);
bNeedDefer = !bFoundCode || bGeneric;
// This is down here because we only need to call SetCodeNofiication once.
if (bNeedDefer)
{
if (pMeth->SetCodeNotification (CLRDATA_METHNOTIFY_GENERATED) != S_OK)
{
bNeedDefer = FALSE;
ExtOut("Failed to set code notification\n");
}
}
return bNeedDefer;
}
private:
BOOL IsIn(__in_z LPWSTR szModule, __in_z LPWSTR szName, TADDR mod)
{
PendingBreakpoint *pCur = m_breakpoints;
while(pCur)
{
if (pCur->ModuleMatches(mod) &&
_wcsicmp(pCur->szModuleName, szModule) == 0 &&
_wcscmp(pCur->szFunctionName, szName) == 0)
{
return TRUE;
}
pCur = pCur->pNext;
}
return FALSE;
}
BOOL IsIn(__in_z LPWSTR szFilename, DWORD lineNumber, TADDR mod)
{
PendingBreakpoint *pCur = m_breakpoints;
while(pCur)
{
if (pCur->ModuleMatches(mod) &&
_wcsicmp(pCur->szFilename, szFilename) == 0 &&
pCur->lineNumber == lineNumber)
{
return TRUE;
}
pCur = pCur->pNext;
}
return FALSE;
}
BOOL IsIn(mdMethodDef token, TADDR mod, DWORD ilOffset)
{
PendingBreakpoint *pCur = m_breakpoints;
while(pCur)
{
if (pCur->ModuleMatches(mod) &&
pCur->methodToken == token &&
pCur->ilOffset == ilOffset)
{
return TRUE;
}
pCur = pCur->pNext;
}
return FALSE;
}
void Delete(PendingBreakpoint *pDelete)
{
PendingBreakpoint *pCur = m_breakpoints;
PendingBreakpoint *pPrev = NULL;
while(pCur)
{
if (pCur == pDelete)
{
if (pPrev == NULL)
{
m_breakpoints = pCur->pNext;
}
else
{
pPrev->pNext = pCur->pNext;
}
delete pCur;
return;
}
pPrev = pCur;
pCur = pCur->pNext;
}
}
HRESULT ResolvePendingNonModuleBoundBreakpoint(TADDR mod, PendingBreakpoint *pCur, SymbolReader* pSymbolReader)
{
// This function only works with pending breakpoints that are not module bound.
if (pCur->pModule == NULL)
{
if (pCur->szModuleName[0] != L'\0')
{
return ResolvePendingNonModuleBoundBreakpoint(pCur->szModuleName, pCur->szFunctionName, mod, pCur->ilOffset);
}
else
{
return ResolvePendingNonModuleBoundBreakpoint(pCur->szFilename, pCur->lineNumber, mod, pSymbolReader);
}
}
else
{
return S_OK;
}
}
// Returns TRUE if further instances may be jitted, FALSE if all instances are now resolved
BOOL ResolvePendingBreakpoint(TADDR addr, PendingBreakpoint *pCur)
{
// Only go forward if the module matches the current PendingBreakpoint
if (!pCur->ModuleMatches(addr))
{
return FALSE;
}
ToRelease<IXCLRDataModule> mod;
if (FAILED(g_sos->GetModule(addr, &mod)))
{
return FALSE;
}
if(pCur->methodToken == 0)
{
return FALSE;
}
ToRelease<IXCLRDataMethodDefinition> pMeth = NULL;
mod->GetMethodDefinitionByToken(pCur->methodToken, &pMeth);
// We may not need the code notification. Maybe it was ngen'd and we
// already have the method?
// We can delete the current entry if ResolveMethodInstances() set all BPs
return ResolveMethodInstances(pMeth, pCur->ilOffset);
}
};
Breakpoints g_bpoints;
// If true, call the HandleRuntimeLoadedNotification function to enable the assembly load and JIT exceptions
#ifndef FEATURE_PAL
bool g_breakOnRuntimeModuleLoad = false;
#endif
// Controls whether optimizations are disabled on module load and whether NGEN can be used
BOOL g_fAllowJitOptimization = TRUE;
// Controls whether a one-shot breakpoint should be inserted the next time
// execution is about to enter a catch clause
BOOL g_stopOnNextCatch = FALSE;
// According to the latest debuggers these callbacks will not get called
// unless the user (or an extension, like SOS :-)) had previously enabled
// clrn with "sxe clrn".
class CNotification : public IXCLRDataExceptionNotification5
{
static int s_condemnedGen;
int m_count;
int m_dbgStatus;
public:
CNotification()
: m_count(0)
, m_dbgStatus(DEBUG_STATUS_NO_CHANGE)
{}
int GetDebugStatus()
{
return m_dbgStatus;
}
STDMETHODIMP QueryInterface (REFIID iid, void **ppvObject)
{
if (ppvObject == NULL)
return E_INVALIDARG;
if (IsEqualIID(iid, IID_IUnknown)
|| IsEqualIID(iid, IID_IXCLRDataExceptionNotification)
|| IsEqualIID(iid, IID_IXCLRDataExceptionNotification2)
|| IsEqualIID(iid, IID_IXCLRDataExceptionNotification3)
|| IsEqualIID(iid, IID_IXCLRDataExceptionNotification4)
|| IsEqualIID(iid, IID_IXCLRDataExceptionNotification5))
{
*ppvObject = static_cast<IXCLRDataExceptionNotification5*>(this);
AddRef();
return S_OK;
}
else
return E_NOINTERFACE;
}
STDMETHODIMP_(ULONG) AddRef(void) { return ++m_count; }
STDMETHODIMP_(ULONG) Release(void)
{
m_count--;
if (m_count < 0)
{
m_count = 0;
}
return m_count;
}
/*
* New code was generated or discarded for a method.:
*/
STDMETHODIMP OnCodeGenerated(IXCLRDataMethodInstance* method)
{
#ifndef FEATURE_PAL
_ASSERTE(g_pRuntime != nullptr);
// This is only needed for desktop runtime because OnCodeGenerated2
// isn't supported by the desktop DAC.
if (g_pRuntime->GetRuntimeConfiguration() == IRuntime::WindowsDesktop)
{
// Some method has been generated, make a breakpoint and remove it.
ULONG32 len = mdNameLen;
LPWSTR szModuleName = (LPWSTR)alloca(mdNameLen * sizeof(WCHAR));
if (method->GetName(0, mdNameLen, &len, g_mdName) == S_OK)
{
ToRelease<IXCLRDataModule> pMod;
HRESULT hr = method->GetTokenAndScope(NULL, &pMod);
if (SUCCEEDED(hr))
{
len = mdNameLen;
if (pMod->GetName(mdNameLen, &len, szModuleName) == S_OK)
{
ExtOut("JITTED %S!%S\n", szModuleName, g_mdName);
// Add breakpoint, perhaps delete pending breakpoint
DacpGetModuleAddress dgma;
if (SUCCEEDED(dgma.Request(pMod)))
{
g_bpoints.Update(TO_TADDR(dgma.ModulePtr), FALSE);
}
else
{
ExtOut("Failed to request module address.\n");
}
}
}
}
}
#endif
m_dbgStatus = DEBUG_STATUS_GO_HANDLED;
return S_OK;
}
STDMETHODIMP OnCodeGenerated2(IXCLRDataMethodInstance* method, CLRDATA_ADDRESS nativeCodeLocation)
{
// Some method has been generated, make a breakpoint.
ULONG32 len = mdNameLen;
LPWSTR szModuleName = (LPWSTR)alloca(mdNameLen * sizeof(WCHAR));
if (method->GetName(0, mdNameLen, &len, g_mdName) == S_OK)
{
ToRelease<IXCLRDataModule> pMod;
HRESULT hr = method->GetTokenAndScope(NULL, &pMod);
if (SUCCEEDED(hr))
{
len = mdNameLen;
if (pMod->GetName(mdNameLen, &len, szModuleName) == S_OK)
{
ExtOut("JITTED %S!%S\n", szModuleName, g_mdName);
DacpGetModuleAddress dgma;
if (SUCCEEDED(dgma.Request(pMod)))
{
g_bpoints.UpdateKnownCodeAddress(TO_TADDR(dgma.ModulePtr), nativeCodeLocation);
}
else
{
ExtOut("Failed to request module address.\n");
}
}
}
}
m_dbgStatus = DEBUG_STATUS_GO_HANDLED;
return S_OK;
}
STDMETHODIMP OnCodeDiscarded(IXCLRDataMethodInstance* method)
{
return E_NOTIMPL;
}
/*
* The process or task reached the desired execution state.
*/
STDMETHODIMP OnProcessExecution(ULONG32 state) { return E_NOTIMPL; }
STDMETHODIMP OnTaskExecution(IXCLRDataTask* task,
ULONG32 state) { return E_NOTIMPL; }
/*
* The given module was loaded or unloaded.
*/
STDMETHODIMP OnModuleLoaded(IXCLRDataModule* mod)
{
DacpGetModuleAddress dgma;
if (SUCCEEDED(dgma.Request(mod)))
{
g_bpoints.Update(TO_TADDR(dgma.ModulePtr), TRUE);
}
if (!g_fAllowJitOptimization)
{
HRESULT hr;
ToRelease<IXCLRDataModule2> mod2;
if (FAILED(mod->QueryInterface(__uuidof(IXCLRDataModule2), (void**) &mod2)))
{
ExtOut("SOS: warning, optimizations for this module could not be suppressed because this CLR version doesn't support the functionality\n");
}
else if(FAILED(hr = mod2->SetJITCompilerFlags(CORDEBUG_JIT_DISABLE_OPTIMIZATION)))
{
if(hr == CORDBG_E_CANT_CHANGE_JIT_SETTING_FOR_ZAP_MODULE)
ExtOut("SOS: warning, optimizations for this module could not be suppressed because an optimized prejitted image was loaded\n");
else
ExtOut("SOS: warning, optimizations for this module could not be suppressed hr=0x%x\n", hr);
}
}
m_dbgStatus = DEBUG_STATUS_GO_HANDLED;
return S_OK;
}
STDMETHODIMP OnModuleUnloaded(IXCLRDataModule* mod)
{
DacpGetModuleAddress dgma;
if (SUCCEEDED(dgma.Request(mod)))
{
g_bpoints.RemovePendingForModule(TO_TADDR(dgma.ModulePtr));
}
m_dbgStatus = DEBUG_STATUS_GO_HANDLED;
return S_OK;
}
/*
* The given type was loaded or unloaded.
*/
STDMETHODIMP OnTypeLoaded(IXCLRDataTypeInstance* typeInst)
{ return E_NOTIMPL; }
STDMETHODIMP OnTypeUnloaded(IXCLRDataTypeInstance* typeInst)
{ return E_NOTIMPL; }
STDMETHODIMP OnAppDomainLoaded(IXCLRDataAppDomain* domain)
{ return E_NOTIMPL; }
STDMETHODIMP OnAppDomainUnloaded(IXCLRDataAppDomain* domain)
{ return E_NOTIMPL; }
STDMETHODIMP OnException(IXCLRDataExceptionState* exception)
{ return E_NOTIMPL; }
STDMETHODIMP OnGcEvent(GcEvtArgs gcEvtArgs)
{
// by default don't stop on these notifications...
m_dbgStatus = DEBUG_STATUS_GO_HANDLED;
IXCLRDataProcess2* idp2 = NULL;
if (SUCCEEDED(g_clrData->QueryInterface(IID_IXCLRDataProcess2, (void**) &idp2)))
{
if (gcEvtArgs.typ == GC_MARK_END)
{
// erase notification request
GcEvtArgs gea = { GC_MARK_END, { 0 } };
idp2->SetGcNotification(gea);
s_condemnedGen = bitidx(gcEvtArgs.condemnedGeneration);
ExtOut("CLR notification: GC - Performing a gen %d collection. Determined surviving objects...\n", s_condemnedGen);
// GC_MARK_END notification means: give the user a chance to examine the debuggee
m_dbgStatus = DEBUG_STATUS_BREAK;
}
}
return S_OK;
}
/*
* Catch is about to be entered
*/
STDMETHODIMP ExceptionCatcherEnter(IXCLRDataMethodInstance* method, DWORD catcherNativeOffset)
{
if(g_stopOnNextCatch)
{
CLRDATA_ADDRESS startAddr;
if(method->GetRepresentativeEntryAddress(&startAddr) == S_OK)
{
CHAR buffer[100];
#ifndef FEATURE_PAL
sprintf_s(buffer, _countof(buffer), "bp /1 %p", (void*) (size_t) (startAddr+catcherNativeOffset));
#else
sprintf_s(buffer, _countof(buffer), "breakpoint set --one-shot --address 0x%p", (void*) (size_t) (startAddr+catcherNativeOffset));
#endif
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
}
g_stopOnNextCatch = FALSE;
}
m_dbgStatus = DEBUG_STATUS_GO_HANDLED;
return S_OK;
}
static int GetCondemnedGen()
{
return s_condemnedGen;
}
};
int CNotification::s_condemnedGen = -1;
BOOL CheckCLRNotificationEvent(DEBUG_LAST_EVENT_INFO_EXCEPTION* pdle)
{
ISOSDacInterface4 *psos4 = NULL;
CLRDATA_ADDRESS arguments[3];
HRESULT Status;
if (SUCCEEDED(Status = g_sos->QueryInterface(__uuidof(ISOSDacInterface4), (void**) &psos4)))
{
int count = _countof(arguments);
int countNeeded = 0;
Status = psos4->GetClrNotification(arguments, count, &countNeeded);
psos4->Release();
if (SUCCEEDED(Status))
{
memset(&pdle->ExceptionRecord, 0, sizeof(pdle->ExceptionRecord));
pdle->FirstChance = TRUE;
pdle->ExceptionRecord.ExceptionCode = CLRDATA_NOTIFY_EXCEPTION;
_ASSERTE(count <= EXCEPTION_MAXIMUM_PARAMETERS);
for (int i = 0; i < count; i++)
{
pdle->ExceptionRecord.ExceptionInformation[i] = arguments[i];
}
// The rest of the ExceptionRecord isn't used by TranslateExceptionRecordToNotification
return TRUE;
}
// No pending exception notification
return FALSE;
}
// The new DAC based interface doesn't exists so ask the debugger for the last exception
// information. NOTE: this function doesn't work on xplat version when the coreclr symbols
// have been stripped.
ULONG Type, ProcessId, ThreadId;
ULONG ExtraInformationUsed;
Status = g_ExtControl->GetLastEventInformation(
&Type,
&ProcessId,
&ThreadId,
pdle,
sizeof(DEBUG_LAST_EVENT_INFO_EXCEPTION),
&ExtraInformationUsed,
NULL,
0,
NULL);
if (Status != S_OK || Type != DEBUG_EVENT_EXCEPTION)
{
return FALSE;
}
if (!pdle->FirstChance || pdle->ExceptionRecord.ExceptionCode != CLRDATA_NOTIFY_EXCEPTION)
{
return FALSE;
}
return TRUE;
}
HRESULT HandleCLRNotificationEvent()
{
/*
* Did we get module load notification? If so, check if any in our pending list
* need to be registered for jit notification.
*
* Did we get a jit notification? If so, check if any can be removed and
* real breakpoints be set.
*/
DEBUG_LAST_EVENT_INFO_EXCEPTION dle;
CNotification Notification;
if (!CheckCLRNotificationEvent(&dle))
{
#ifndef FEATURE_PAL
ExtOut("Expecting first chance CLRN exception\n");
return E_FAIL;
#else
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "process continue", 0);
return S_OK;
#endif
}
// Notification only needs to live for the lifetime of the call below, so it's a non-static
// local.
HRESULT Status = g_clrData->TranslateExceptionRecordToNotification(&dle.ExceptionRecord, &Notification);
if (Status != S_OK)
{
ExtErr("Error processing exception notification\n");
return Status;
}
else
{
switch (Notification.GetDebugStatus())
{
case DEBUG_STATUS_GO:
case DEBUG_STATUS_GO_HANDLED:
case DEBUG_STATUS_GO_NOT_HANDLED:
#ifndef FEATURE_PAL
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "g", 0);
#else
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "process continue", 0);
#endif
break;
default:
break;
}
}
return S_OK;
}
void EnableModuleLoadUnloadCallbacks()
{
_ASSERTE(g_clrData != nullptr);
ULONG32 flags = 0;
g_clrData->GetOtherNotificationFlags(&flags);
flags |= (CLRDATA_NOTIFY_ON_MODULE_LOAD | CLRDATA_NOTIFY_ON_MODULE_UNLOAD);
g_clrData->SetOtherNotificationFlags(flags);
}
#ifndef FEATURE_PAL
DECLARE_API(SOSHandleCLRN)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
return HandleCLRNotificationEvent();
}
HRESULT HandleRuntimeLoadedNotification(IDebugClient* client)
{
INIT_API();
EnableModuleLoadUnloadCallbacks();
return g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "sxe -c \"!SOSHandleCLRN\" clrn", 0);
}
#else // FEATURE_PAL
HRESULT HandleExceptionNotification(ILLDBServices *client)
{
INIT_API();
return HandleCLRNotificationEvent();
}
HRESULT HandleRuntimeLoadedNotification(ILLDBServices *client)
{
INIT_API();
EnableModuleLoadUnloadCallbacks();
return g_ExtServices->SetExceptionCallback(HandleExceptionNotification);
}
#endif // FEATURE_PAL
DECLARE_API(bpmd)
{
INIT_API_NOEE();
MINIDUMP_NOT_SUPPORTED();
char buffer[1024];
if (IsDumpFile())
{
ExtOut(SOSPrefix "bpmd is not supported on a dump file.\n");
return Status;
}
// We keep a list of managed breakpoints the user wants to set, and display pending bps
// bpmd. If you call bpmd <module name> <method> we will set or update an existing bp.
// bpmd acts as a feeder of breakpoints to bp when the time is right.
//
StringHolder DllName,TypeName;
int lineNumber = 0;
size_t Offset = 0;
DWORD_PTR pMD = NULL;
BOOL fNoFutureModule = FALSE;
BOOL fList = FALSE;
size_t clearItem = 0;
BOOL fClearAll = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-md", &pMD, COHEX, TRUE},
{"-nofuturemodule", &fNoFutureModule, COBOOL, FALSE},
{"-list", &fList, COBOOL, FALSE},
{"-clear", &clearItem, COSIZE_T, TRUE},
{"-clearall", &fClearAll, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&DllName.data, COSTRING},
{&TypeName.data, COSTRING},
{&Offset, COSIZE_T},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
bool fBadParam = false;
bool fIsFilename = false;
int commandsParsed = 0;
if (pMD != NULL)
{
if (nArg != 0)
{
fBadParam = true;
}
commandsParsed++;
}
if (fList)
{
commandsParsed++;
if (nArg != 0)
{
fBadParam = true;
}
}
if (fClearAll)
{
commandsParsed++;
if (nArg != 0)
{
fBadParam = true;
}
}
if (clearItem != 0)
{
commandsParsed++;
if (nArg != 0)
{
fBadParam = true;
}
}
if (1 <= nArg && nArg <= 3)
{
commandsParsed++;
// did we get dll and type name or file:line#? Search for a colon in the first arg
// to see if it is in fact a file:line#
CHAR* pColon = strchr(DllName.data, ':');
if(NULL != pColon)
{
fIsFilename = true;
*pColon = '\0';
pColon++;
if(1 != sscanf_s(pColon, "%d", &lineNumber))
{
ExtOut("Unable to parse line number\n");
fBadParam = true;
}
else if(lineNumber < 0)
{
ExtOut("Line number must be positive\n");
fBadParam = true;
}
if(nArg != 1) fBadParam = 1;
}
}
if (fBadParam || (commandsParsed != 1))
{
ExtOut("Usage: " SOSPrefix "bpmd -md <MethodDesc pointer>\n");
ExtOut("Usage: " SOSPrefix "bpmd [-nofuturemodule] <module name> <managed function name> [<il offset>]\n");
ExtOut("Usage: " SOSPrefix "bpmd <filename>:<line number>\n");
ExtOut("Usage: " SOSPrefix "bpmd -list\n");
ExtOut("Usage: " SOSPrefix "bpmd -clear <pending breakpoint number>\n");
ExtOut("Usage: " SOSPrefix "bpmd -clearall\n");
#ifdef FEATURE_PAL
ExtOut("See \"soshelp bpmd\" for more details.\n");
#else
ExtOut("See \"!help bpmd\" for more details.\n");
#endif
return Status;
}
if (fList)
{
g_bpoints.ListBreakpoints();
return Status;
}
if (clearItem != 0)
{
g_bpoints.ClearBreakpoint(clearItem);
return Status;
}
if (fClearAll)
{
g_bpoints.ClearAllBreakpoints();
return Status;
}
// Add a breakpoint
// Do we already have this breakpoint?
// Or, before setting it, is the module perhaps already loaded and code
// is available? If so, don't add to our pending list, just go ahead and
// set the real breakpoint.
LPWSTR ModuleName = (LPWSTR)alloca(mdNameLen * sizeof(WCHAR));
LPWSTR FunctionName = (LPWSTR)alloca(mdNameLen * sizeof(WCHAR));
LPWSTR Filename = (LPWSTR)alloca(MAX_LONGPATH * sizeof(WCHAR));
BOOL bNeedNotificationExceptions = FALSE;
if (pMD == NULL)
{
int numModule = 0;
int numMethods = 0;
ArrayHolder<DWORD_PTR> moduleList = NULL;
if(!fIsFilename)
{
MultiByteToWideChar(CP_ACP, 0, DllName.data, -1, ModuleName, mdNameLen);
MultiByteToWideChar(CP_ACP, 0, TypeName.data, -1, FunctionName, mdNameLen);
}
else
{
MultiByteToWideChar(CP_ACP, 0, DllName.data, -1, Filename, MAX_LONGPATH);
}
// Get modules that may need a breakpoint bound
if ((Status = CheckEEDll()) == S_OK)
{
if ((Status = LoadClrDebugDll()) != S_OK)
{
// if the EE is loaded but DAC isn't we should stop.
DACMessage(Status);
return Status;
}
g_bDacBroken = FALSE; \
// Get the module list
moduleList = ModuleFromName(fIsFilename ? NULL : DllName.data, &numModule);
// Its OK if moduleList is NULL
// There is a very normal case when checking for modules after clr is loaded
// but before any AppDomains or assemblies are created
// for example:
// >sxe ld:clr
// >g
// ...
// ModLoad: clr.dll
// >!bpmd Foo.dll Foo.Bar
}
// If LoadClrDebugDll() succeeded make sure we release g_clrData
ToRelease<IXCLRDataProcess> spIDP(g_clrData);
ToRelease<ISOSDacInterface> spISD(g_sos);
if (g_sos != nullptr)
{
ResetGlobals();
}
// we can get here with EE not loaded => 0 modules
// EE is loaded => 0 or more modules
ArrayHolder<DWORD_PTR> pMDs = NULL;
for (int iModule = 0; iModule < numModule; iModule++)
{
ToRelease<IXCLRDataModule> ModDef;
if (g_sos->GetModule(moduleList[iModule], &ModDef) != S_OK)
{
continue;
}
HRESULT symbolsLoaded = S_FALSE;
if(!fIsFilename)
{
g_bpoints.ResolvePendingNonModuleBoundBreakpoint(ModuleName, FunctionName, moduleList[iModule], (DWORD)Offset);
}
else
{
SymbolReader symbolReader;
symbolsLoaded = g_bpoints.LoadSymbolsForModule(moduleList[iModule], &symbolReader);
if(symbolsLoaded == S_OK &&
g_bpoints.ResolvePendingNonModuleBoundBreakpoint(Filename, lineNumber, moduleList[iModule], &symbolReader) == S_OK)
{
// if we have symbols then get the function name so we can lookup the MethodDescs
mdMethodDef methodDefToken;
ULONG32 ilOffset;
if(SUCCEEDED(symbolReader.ResolveSequencePoint(Filename, lineNumber, &methodDefToken, &ilOffset)))
{
ToRelease<IXCLRDataMethodDefinition> pMethodDef = NULL;
if (SUCCEEDED(ModDef->GetMethodDefinitionByToken(methodDefToken, &pMethodDef)))
{
ULONG32 nameLen = 0;
pMethodDef->GetName(0, mdNameLen, &nameLen, FunctionName);
// get the size of the required buffer
int buffSize = WideCharToMultiByte(CP_ACP, 0, FunctionName, -1, TypeName.data, 0, NULL, NULL);
TypeName.data = new NOTHROW char[buffSize];
if (TypeName.data != NULL)
{
int bytesWritten = WideCharToMultiByte(CP_ACP, 0, FunctionName, -1, TypeName.data, buffSize, NULL, NULL);
_ASSERTE(bytesWritten == buffSize);
}
}
}
}
}
HRESULT gotMethodDescs = GetMethodDescsFromName(moduleList[iModule], ModDef, TypeName.data, &pMDs, &numMethods);
if (FAILED(gotMethodDescs) && (!fIsFilename))
{
// BPs via file name will enumerate through modules so there will be legitimate failures.
// for module/type name we already found a match so this shouldn't fail (this is the original behavior).
ExtOut("Error getting MethodDescs for module %p\n", moduleList[iModule]);
return Status;
}
// for filename+line number only print extra info if symbols for this module are loaded (it can get quite noisy otherwise).
if ((!fIsFilename) || (fIsFilename && symbolsLoaded == S_OK))
{
for (int i = 0; i < numMethods; i++)
{
if (pMDs[i] == MD_NOT_YET_LOADED)
{
continue;
}
ExtOut("MethodDesc = %p\n", SOS_PTR(pMDs[i]));
}
}
if (g_bpoints.Update(moduleList[iModule], FALSE))
{
bNeedNotificationExceptions = TRUE;
}
}
if (!fNoFutureModule)
{
// add a pending breakpoint that will find future loaded modules, and
// wait for the module load notification.
if (!fIsFilename)
{
g_bpoints.Add(ModuleName, FunctionName, NULL, (DWORD)Offset);
}
else
{
g_bpoints.Add(Filename, lineNumber, NULL);
}
if (g_clrData != nullptr)
{
bNeedNotificationExceptions = TRUE;
EnableModuleLoadUnloadCallbacks();
}
else
{
#ifdef FEATURE_PAL
Status = g_ExtServices2->SetRuntimeLoadedCallback(HandleRuntimeLoadedNotification);
#else
g_breakOnRuntimeModuleLoad = true;
#endif
}
}
}
else /* We were given a MethodDesc already */
{
// if we've got an explicit MD, then we better have runtime and dac loaded
INIT_API_EE()
INIT_API_DAC();
DacpMethodDescData MethodDescData;
ExtOut("MethodDesc = %p\n", SOS_PTR(pMD));
if (MethodDescData.Request(g_sos, TO_CDADDR(pMD)) != S_OK)
{
ExtOut("%p is not a valid MethodDesc\n", SOS_PTR(pMD));
return Status;
}
if (MethodDescData.bHasNativeCode)
{
IssueDebuggerBPCommand((size_t) MethodDescData.NativeCodeAddr);
}
else if (MethodDescData.bIsDynamic)
{
#ifndef FEATURE_PAL
// Dynamic methods don't have JIT notifications. This is something we must
// fix in the next release. Until then, you have a cumbersome user experience.
ExtOut("This DynamicMethodDesc is not yet JITTED. Placing memory breakpoint at %p\n",
MethodDescData.AddressOfNativeCodeSlot);
sprintf_s(buffer, _countof(buffer),
#ifdef _TARGET_WIN64_
"ba w8"
#else
"ba w4"
#endif // _TARGET_WIN64_
" /1 %p \"bp poi(%p); g\"",
(void*) (size_t) MethodDescData.AddressOfNativeCodeSlot,
(void*) (size_t) MethodDescData.AddressOfNativeCodeSlot);
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
if (FAILED(Status))
{
ExtOut("Unable to set breakpoint with IDebugControl::Execute: %x\n",Status);
ExtOut("Attempted to run: %s\n", buffer);
}
#else
ExtErr("This DynamicMethodDesc is not yet JITTED %p\n", MethodDescData.AddressOfNativeCodeSlot);
#endif // FEATURE_PAL
}
else
{
// Must issue a pending breakpoint.
if (g_sos->GetMethodDescName(pMD, mdNameLen, FunctionName, NULL) != S_OK)
{
ExtOut("Unable to get method name for MethodDesc %p\n", SOS_PTR(pMD));
return Status;
}
FileNameForModule ((DWORD_PTR) MethodDescData.ModulePtr, ModuleName);
// We didn't find code, add a breakpoint.
g_bpoints.ResolvePendingNonModuleBoundBreakpoint(ModuleName, FunctionName, TO_TADDR(MethodDescData.ModulePtr), 0);
g_bpoints.Update(TO_TADDR(MethodDescData.ModulePtr), FALSE);
bNeedNotificationExceptions = TRUE;
}
}
if (bNeedNotificationExceptions)
{
ExtOut("Adding pending breakpoints...\n");
#ifndef FEATURE_PAL
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "sxe -c \"!SOSHandleCLRN\" clrn", 0);
#else
Status = g_ExtServices->SetExceptionCallback(HandleExceptionNotification);
#endif // FEATURE_PAL
}
return Status;
}
#ifndef FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the managed threadpool *
* *
\**********************************************************************/
DECLARE_API(ThreadPool)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DacpThreadpoolData threadpool;
if ((Status = threadpool.Request(g_sos)) == S_OK)
{
BOOL doHCDump = FALSE, doWorkItemDump = FALSE, dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-ti", &doHCDump, COBOOL, FALSE},
{"-wi", &doWorkItemDump, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
ExtOut ("CPU utilization: %d%%\n", threadpool.cpuUtilization);
ExtOut ("Worker Thread:");
ExtOut (" Total: %d", threadpool.NumWorkingWorkerThreads + threadpool.NumIdleWorkerThreads + threadpool.NumRetiredWorkerThreads);
ExtOut (" Running: %d", threadpool.NumWorkingWorkerThreads);
ExtOut (" Idle: %d", threadpool.NumIdleWorkerThreads);
ExtOut (" MaxLimit: %d", threadpool.MaxLimitTotalWorkerThreads);
ExtOut (" MinLimit: %d", threadpool.MinLimitTotalWorkerThreads);
ExtOut ("\n");
int numWorkRequests = 0;
CLRDATA_ADDRESS workRequestPtr = threadpool.FirstUnmanagedWorkRequest;
DacpWorkRequestData workRequestData;
while (workRequestPtr)
{
if ((Status = workRequestData.Request(g_sos,workRequestPtr))!=S_OK)
{
ExtOut(" Failed to examine a WorkRequest\n");
return Status;
}
numWorkRequests++;
workRequestPtr = workRequestData.NextWorkRequest;
}
ExtOut ("Work Request in Queue: %d\n", numWorkRequests);
workRequestPtr = threadpool.FirstUnmanagedWorkRequest;
while (workRequestPtr)
{
if ((Status = workRequestData.Request(g_sos,workRequestPtr))!=S_OK)
{
ExtOut(" Failed to examine a WorkRequest\n");
return Status;
}
if (workRequestData.Function == threadpool.AsyncTimerCallbackCompletionFPtr)
ExtOut (" AsyncTimerCallbackCompletion TimerInfo@%p\n", SOS_PTR(workRequestData.Context));
else
ExtOut (" Unknown Function: %p Context: %p\n", SOS_PTR(workRequestData.Function),
SOS_PTR(workRequestData.Context));
workRequestPtr = workRequestData.NextWorkRequest;
}
if (doWorkItemDump && g_snapshot.Build())
{
// Display a message if the heap isn't verified.
sos::GCHeap gcheap;
if (!gcheap.AreGCStructuresValid())
{
DisplayInvalidStructuresMessage();
}
int numModule;
ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(const_cast<LPSTR>("System.Private.CoreLib.dll"), &numModule);
if (moduleList == NULL || numModule != 1)
{
ExtOut(" Failed to find System.Private.CoreLib.dll\n");
return Status;
}
DWORD_PTR corelibModule = moduleList[0];
mdTypeDef threadPoolWorkQueueMd, threadPoolWorkStealingQueueMd;
GetInfoFromName(corelibModule, "System.Threading.ThreadPoolWorkQueue", &threadPoolWorkQueueMd);
GetInfoFromName(corelibModule, "System.Threading.ThreadPoolWorkQueue+WorkStealingQueue", &threadPoolWorkStealingQueueMd);
// Walk every heap item looking for the global queue and local queues.
ExtOut("\nQueued work items:\n%" POINTERSIZE "s %" POINTERSIZE "s %s\n", "Queue", "Address", "Work Item");
HeapStat stats;
for (sos::ObjectIterator itr = gcheap.WalkHeap(); !IsInterrupt() && itr != NULL; ++itr)
{
DacpMethodTableData mtdata;
if (mtdata.Request(g_sos, TO_TADDR(itr->GetMT())) != S_OK ||
mtdata.Module != corelibModule)
{
continue;
}
if (mtdata.cl == threadPoolWorkQueueMd)
{
// We found a global queue (there should be only one, given one AppDomain).
// Get its workItems ConcurrentQueue<IThreadPoolWorkItem>.
int offset = GetObjFieldOffset(itr->GetAddress(), itr->GetMT(), W("workItems"));
if (offset > 0)
{
DWORD_PTR workItemsConcurrentQueuePtr;
MOVE(workItemsConcurrentQueuePtr, itr->GetAddress() + offset);
if (sos::IsObject(workItemsConcurrentQueuePtr, false))
{
// We got the ConcurrentQueue. Get its head segment.
sos::Object workItemsConcurrentQueue = TO_TADDR(workItemsConcurrentQueuePtr);
offset = GetObjFieldOffset(workItemsConcurrentQueue.GetAddress(), workItemsConcurrentQueue.GetMT(), W("_head"));
if (offset > 0)
{
// Now, walk from segment to segment, each of which contains an array of work items.
DWORD_PTR segmentPtr;
MOVE(segmentPtr, workItemsConcurrentQueue.GetAddress() + offset);
while (sos::IsObject(segmentPtr, false))
{
sos::Object segment = TO_TADDR(segmentPtr);
// Get the work items array. It's an array of Slot structs, which starts with the T.
offset = GetObjFieldOffset(segment.GetAddress(), segment.GetMT(), W("_slots"));
if (offset <= 0)
{
break;
}
DWORD_PTR slotsPtr;
MOVE(slotsPtr, segment.GetAddress() + offset);
if (!sos::IsObject(slotsPtr, false))
{
break;
}
// Walk every element in the array, outputting details on non-null work items.
DacpObjectData slotsArray;
if (slotsArray.Request(g_sos, TO_CDADDR(slotsPtr)) == S_OK && slotsArray.ObjectType == OBJ_ARRAY)
{
for (int i = 0; i < slotsArray.dwNumComponents; i++)
{
CLRDATA_ADDRESS workItemPtr;
MOVE(workItemPtr, TO_CDADDR(slotsArray.ArrayDataPtr + (i * slotsArray.dwComponentSize))); // the item object reference is at the beginning of the Slot
if (workItemPtr != NULL && sos::IsObject(workItemPtr, false))
{
sos::Object workItem = TO_TADDR(workItemPtr);
stats.Add((DWORD_PTR)workItem.GetMT(), (DWORD)workItem.GetSize());
DMLOut("%" POINTERSIZE "s %s %S", "[Global]", DMLObject(workItem.GetAddress()), workItem.GetTypeName());
if ((offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("_callback"))) > 0 ||
(offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("m_action"))) > 0)
{
CLRDATA_ADDRESS delegatePtr;
MOVE(delegatePtr, workItem.GetAddress() + offset);
CLRDATA_ADDRESS md;
if (TryGetMethodDescriptorForDelegate(delegatePtr, &md))
{
NameForMD_s((DWORD_PTR)md, g_mdName, mdNameLen);
ExtOut(" => %S", g_mdName);
}
}
ExtOut("\n");
}
}
}
// Move to the next segment.
DacpFieldDescData segmentField;
offset = GetObjFieldOffset(segment.GetAddress(), segment.GetMT(), W("_nextSegment"), TRUE, &segmentField);
if (offset <= 0)
{
break;
}
MOVE(segmentPtr, segment.GetAddress() + offset);
if (segmentPtr == NULL)
{
break;
}
}
}
}
}
}
else if (mtdata.cl == threadPoolWorkStealingQueueMd)
{
// We found a local queue. Get its work items array.
int offset = GetObjFieldOffset(itr->GetAddress(), itr->GetMT(), W("m_array"));
if (offset > 0)
{
// Walk every element in the array, outputting details on non-null work items.
DWORD_PTR workItemArrayPtr;
MOVE(workItemArrayPtr, itr->GetAddress() + offset);
DacpObjectData workItemArray;
if (workItemArray.Request(g_sos, TO_CDADDR(workItemArrayPtr)) == S_OK && workItemArray.ObjectType == OBJ_ARRAY)
{
for (int i = 0; i < workItemArray.dwNumComponents; i++)
{
CLRDATA_ADDRESS workItemPtr;
MOVE(workItemPtr, TO_CDADDR(workItemArray.ArrayDataPtr + (i * workItemArray.dwComponentSize)));
if (workItemPtr != NULL && sos::IsObject(workItemPtr, false))
{
sos::Object workItem = TO_TADDR(workItemPtr);
stats.Add((DWORD_PTR)workItem.GetMT(), (DWORD)workItem.GetSize());
DMLOut("%s %s %S", DMLObject(itr->GetAddress()), DMLObject(workItem.GetAddress()), workItem.GetTypeName());
if ((offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("_callback"))) > 0 ||
(offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("m_action"))) > 0)
{
CLRDATA_ADDRESS delegatePtr;
MOVE(delegatePtr, workItem.GetAddress() + offset);
CLRDATA_ADDRESS md;
if (TryGetMethodDescriptorForDelegate(delegatePtr, &md))
{
NameForMD_s((DWORD_PTR)md, g_mdName, mdNameLen);
ExtOut(" => %S", g_mdName);
}
}
ExtOut("\n");
}
}
}
}
}
}
// Output a summary.
stats.Sort();
stats.Print();
ExtOut("\n");
}
if (doHCDump)
{
ExtOut ("--------------------------------------\n");
ExtOut ("\nThread Injection History\n");
if (threadpool.HillClimbingLogSize > 0)
{
static char const * const TransitionNames[] =
{
"Warmup",
"Initializing",
"RandomMove",
"ClimbingMove",
"ChangePoint",
"Stabilizing",
"Starvation",
"ThreadTimedOut",
"Undefined"
};
ExtOut("\n Time Transition New #Threads #Samples Throughput\n");
DacpHillClimbingLogEntry entry;
// get the most recent entry first, so we can calculate time offsets
int index = (threadpool.HillClimbingLogFirstIndex + threadpool.HillClimbingLogSize-1) % HillClimbingLogCapacity;
CLRDATA_ADDRESS entryPtr = threadpool.HillClimbingLog + (index * sizeof(HillClimbingLogEntry));
if ((Status = entry.Request(g_sos,entryPtr))!=S_OK)
{
ExtOut(" Failed to examine a HillClimbing log entry\n");
return Status;
}
DWORD endTime = entry.TickCount;
for (int i = 0; i < threadpool.HillClimbingLogSize; i++)
{
index = (i + threadpool.HillClimbingLogFirstIndex) % HillClimbingLogCapacity;
entryPtr = threadpool.HillClimbingLog + (index * sizeof(HillClimbingLogEntry));
if ((Status = entry.Request(g_sos,entryPtr))!=S_OK)
{
ExtOut(" Failed to examine a HillClimbing log entry\n");
return Status;
}
ExtOut("%8.2lf %-14s %12d %12d %11.2lf\n",
(double)(int)(entry.TickCount - endTime) / 1000.0,
TransitionNames[entry.Transition],
entry.NewControlSetting,
entry.LastHistoryCount,
entry.LastHistoryMean);
}
}
}
ExtOut ("--------------------------------------\n");
ExtOut ("Number of Timers: %d\n", threadpool.NumTimers);
ExtOut ("--------------------------------------\n");
ExtOut ("Completion Port Thread:");
ExtOut ("Total: %d", threadpool.NumCPThreads);
ExtOut (" Free: %d", threadpool.NumFreeCPThreads);
ExtOut (" MaxFree: %d", threadpool.MaxFreeCPThreads);
ExtOut (" CurrentLimit: %d", threadpool.CurrentLimitTotalCPThreads);
ExtOut (" MaxLimit: %d", threadpool.MaxLimitTotalCPThreads);
ExtOut (" MinLimit: %d", threadpool.MinLimitTotalCPThreads);
ExtOut ("\n");
}
else
{
ExtOut("Failed to request ThreadpoolMgr information\n");
}
return Status;
}
#endif // FEATURE_PAL
DECLARE_API(FindAppDomain)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR p_Object = NULL;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&p_Object, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if ((p_Object == 0) || !sos::IsObject(p_Object))
{
ExtOut("%p is not a valid object\n", SOS_PTR(p_Object));
return Status;
}
DacpAppDomainStoreData adstore;
if (adstore.Request(g_sos) != S_OK)
{
ExtOut("Error getting AppDomain information\n");
return Status;
}
CLRDATA_ADDRESS appDomain = GetAppDomain (TO_CDADDR(p_Object));
if (appDomain != NULL)
{
DMLOut("AppDomain: %s\n", DMLDomain(appDomain));
if (appDomain == adstore.sharedDomain)
{
ExtOut("Name: Shared Domain\n");
ExtOut("ID: (shared domain)\n");
}
else if (appDomain == adstore.systemDomain)
{
ExtOut("Name: System Domain\n");
ExtOut("ID: (system domain)\n");
}
else
{
DacpAppDomainData domain;
if ((domain.Request(g_sos, appDomain) != S_OK) ||
(g_sos->GetAppDomainName(appDomain,mdNameLen,g_mdName, NULL)!=S_OK))
{
ExtOut("Error getting AppDomain %p.\n", SOS_PTR(appDomain));
return Status;
}
ExtOut("Name: %S\n", (g_mdName[0]!=L'\0') ? g_mdName : W("None"));
ExtOut("ID: %d\n", domain.dwId);
}
}
else
{
ExtOut("The type is declared in the shared domain and other\n");
ExtOut("methods of finding the AppDomain failed. Try running\n");
if (IsDMLEnabled())
DMLOut("<exec cmd=\"!gcroot /d %p\">!gcroot %p</exec>, and if you find a root on a\n", p_Object, p_Object);
else
ExtOut(SOSPrefix "gcroot %p, and if you find a root on a\n", p_Object);
ExtOut("stack, check the AppDomain of that stack with " SOSThreads ".\n");
ExtOut("Note that the Thread could have transitioned between\n");
ExtOut("multiple AppDomains.\n");
}
return Status;
}
#ifndef FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to get the COM state (e.g. APT,contexe *
* activity. *
* *
\**********************************************************************/
#ifdef FEATURE_COMINTEROP
DECLARE_API(COMState)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
ULONG numThread;
ULONG maxId;
g_ExtSystem->GetTotalNumberThreads(&numThread,&maxId);
ULONG curId;
g_ExtSystem->GetCurrentThreadId(&curId);
SIZE_T AllocSize;
if (!ClrSafeInt<SIZE_T>::multiply(sizeof(ULONG), numThread, AllocSize))
{
ExtOut(" Error! integer overflow on numThread 0x%08x\n", numThread);
return Status;
}
ULONG *ids = (ULONG*)alloca(AllocSize);
ULONG *sysIds = (ULONG*)alloca(AllocSize);
g_ExtSystem->GetThreadIdsByIndex(0,numThread,ids,sysIds);
#if defined(_TARGET_WIN64_)
ExtOut(" ID TEB APT APTId CallerTID Context\n");
#else
ExtOut(" ID TEB APT APTId CallerTID Context\n");
#endif
for (ULONG i = 0; i < numThread; i ++) {
g_ExtSystem->SetCurrentThreadId(ids[i]);
CLRDATA_ADDRESS cdaTeb;
g_ExtSystem->GetCurrentThreadTeb(&cdaTeb);
ExtOut("%3d %4x %p", ids[i], sysIds[i], SOS_PTR(CDA_TO_UL64(cdaTeb)));
// Apartment state
TADDR OleTlsDataAddr;
if (SafeReadMemory(TO_TADDR(cdaTeb) + offsetof(TEB,ReservedForOle),
&OleTlsDataAddr,
sizeof(OleTlsDataAddr), NULL) && OleTlsDataAddr != 0) {
DWORD AptState;
if (SafeReadMemory(OleTlsDataAddr+offsetof(SOleTlsData,dwFlags),
&AptState,
sizeof(AptState), NULL)) {
if (AptState & OLETLS_APARTMENTTHREADED) {
ExtOut(" STA");
}
else if (AptState & OLETLS_MULTITHREADED) {
ExtOut(" MTA");
}
else if (AptState & OLETLS_INNEUTRALAPT) {
ExtOut(" NTA");
}
else {
ExtOut(" Ukn");
}
// Read these fields only if we were able to read anything of the SOleTlsData structure
DWORD dwApartmentID;
if (SafeReadMemory(OleTlsDataAddr+offsetof(SOleTlsData,dwApartmentID),
&dwApartmentID,
sizeof(dwApartmentID), NULL)) {
ExtOut(" %8x", dwApartmentID);
}
else
ExtOut(" %8x", 0);
DWORD dwTIDCaller;
if (SafeReadMemory(OleTlsDataAddr+offsetof(SOleTlsData,dwTIDCaller),
&dwTIDCaller,
sizeof(dwTIDCaller), NULL)) {
ExtOut(" %8x", dwTIDCaller);
}
else
ExtOut(" %8x", 0);
size_t Context;
if (SafeReadMemory(OleTlsDataAddr+offsetof(SOleTlsData,pCurrentCtx),
&Context,
sizeof(Context), NULL)) {
ExtOut(" %p", SOS_PTR(Context));
}
else
ExtOut(" %p", SOS_PTR(0));
}
else
ExtOut(" Ukn");
}
else
ExtOut(" Ukn");
ExtOut("\n");
}
g_ExtSystem->SetCurrentThreadId(curId);
return Status;
}
#endif // FEATURE_COMINTEROP
#endif // FEATURE_PAL
BOOL traverseEh(UINT clauseIndex,UINT totalClauses,DACEHInfo *pEHInfo,LPVOID token)
{
size_t methodStart = (size_t) token;
if (IsInterrupt())
{
return FALSE;
}
ExtOut("EHHandler %d: %s ", clauseIndex, EHTypeName(pEHInfo->clauseType));
LPCWSTR typeName = EHTypedClauseTypeName(pEHInfo);
if (typeName != NULL)
{
ExtOut("catch(%S) ", typeName);
}
if (IsClonedFinally(pEHInfo))
ExtOut("(cloned finally)");
else if (pEHInfo->isDuplicateClause)
ExtOut("(duplicate)");
ExtOut("\n");
ExtOut("Clause: ");
ULONG64 addrStart = pEHInfo->tryStartOffset + methodStart;
ULONG64 addrEnd = pEHInfo->tryEndOffset + methodStart;
#ifdef _WIN64
ExtOut("[%08x`%08x, %08x`%08x]",
(ULONG)(addrStart >> 32), (ULONG)addrStart,
(ULONG)(addrEnd >> 32), (ULONG)addrEnd);
#else
ExtOut("[%08x, %08x]", (ULONG)addrStart, (ULONG)addrEnd);
#endif
ExtOut(" [%x, %x]\n",
(UINT32) pEHInfo->tryStartOffset,
(UINT32) pEHInfo->tryEndOffset);
ExtOut("Handler: ");
addrStart = pEHInfo->handlerStartOffset + methodStart;
addrEnd = pEHInfo->handlerEndOffset + methodStart;
#ifdef _WIN64
ExtOut("[%08x`%08x, %08x`%08x]",
(ULONG)(addrStart >> 32), (ULONG)addrStart,
(ULONG)(addrEnd >> 32), (ULONG)addrEnd);
#else
ExtOut("[%08x, %08x]", (ULONG)addrStart, (ULONG)addrEnd);
#endif
ExtOut(" [%x, %x]\n",
(UINT32) pEHInfo->handlerStartOffset,
(UINT32) pEHInfo->handlerEndOffset);
if (pEHInfo->clauseType == EHFilter)
{
ExtOut("Filter: ");
addrStart = pEHInfo->filterOffset + methodStart;
#ifdef _WIN64
ExtOut("[%08x`%08x]", (ULONG)(addrStart >> 32), (ULONG)addrStart);
#else
ExtOut("[%08x]", (ULONG)addrStart);
#endif
ExtOut(" [%x]\n",
(UINT32) pEHInfo->filterOffset);
}
ExtOut("\n");
return TRUE;
}
DECLARE_API(EHInfo)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR dwStartAddr = NULL;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&dwStartAddr, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg) || (0 == nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
DWORD_PTR tmpAddr = dwStartAddr;
if (!IsMethodDesc(dwStartAddr))
{
JITTypes jitType;
DWORD_PTR methodDesc;
DWORD_PTR gcinfoAddr;
IP2MethodDesc (dwStartAddr, methodDesc, jitType, gcinfoAddr);
tmpAddr = methodDesc;
}
DacpMethodDescData MD;
if ((tmpAddr == 0) || (MD.Request(g_sos, TO_CDADDR(tmpAddr)) != S_OK))
{
ExtOut("%p is not a MethodDesc\n", SOS_PTR(tmpAddr));
return Status;
}
if (1 == nArg && !MD.bHasNativeCode)
{
ExtOut("No EH info available\n");
return Status;
}
DacpCodeHeaderData codeHeaderData;
if (codeHeaderData.Request(g_sos, TO_CDADDR(MD.NativeCodeAddr)) != S_OK)
{
ExtOut("Unable to get codeHeader information\n");
return Status;
}
DMLOut("MethodDesc: %s\n", DMLMethodDesc(MD.MethodDescPtr));
DumpMDInfo(TO_TADDR(MD.MethodDescPtr));
ExtOut("\n");
Status = g_sos->TraverseEHInfo(TO_CDADDR(MD.NativeCodeAddr), traverseEh, (LPVOID)MD.NativeCodeAddr);
if (Status == E_ABORT)
{
ExtOut("<user aborted>\n");
}
else if (Status != S_OK)
{
ExtOut("Failed to perform EHInfo traverse\n");
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the GC encoding of a managed *
* function. *
* *
\**********************************************************************/
DECLARE_API(GCInfo)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
TADDR taStartAddr = NULL;
TADDR taGCInfoAddr;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&taStartAddr, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg) || (0 == nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
TADDR tmpAddr = taStartAddr;
if (!IsMethodDesc(taStartAddr))
{
JITTypes jitType;
TADDR methodDesc;
TADDR gcinfoAddr;
IP2MethodDesc(taStartAddr, methodDesc, jitType, gcinfoAddr);
tmpAddr = methodDesc;
}
DacpMethodDescData MD;
if ((tmpAddr == 0) || (MD.Request(g_sos, TO_CDADDR(tmpAddr)) != S_OK))
{
ExtOut("%p is not a valid MethodDesc\n", SOS_PTR(taStartAddr));
return Status;
}
if (1 == nArg && !MD.bHasNativeCode)
{
ExtOut("No GC info available\n");
return Status;
}
DacpCodeHeaderData codeHeaderData;
if (
// Try to get code header data from taStartAddr. This will get the code
// header corresponding to the IP address, even if the function was rejitted
(codeHeaderData.Request(g_sos, TO_CDADDR(taStartAddr)) != S_OK) &&
// If that didn't work, just try to use the code address that the MD
// points to. If the function was rejitted, this will only give you the
// original JITted code, but that's better than nothing
(codeHeaderData.Request(g_sos, TO_CDADDR(MD.NativeCodeAddr)) != S_OK)
)
{
// We always used to emit this (before rejit support), even if we couldn't get
// the code header, so keep on doing so.
ExtOut("entry point %p\n", SOS_PTR(MD.NativeCodeAddr));
// And now the error....
ExtOut("Unable to get codeHeader information\n");
return Status;
}
// We have the code header, so use it to determine the method start
ExtOut("entry point %p\n", SOS_PTR(codeHeaderData.MethodStart));
if (codeHeaderData.JITType == TYPE_UNKNOWN)
{
ExtOut("unknown Jit\n");
return Status;
}
else if (codeHeaderData.JITType == TYPE_JIT)
{
ExtOut("Normal JIT generated code\n");
}
else if (codeHeaderData.JITType == TYPE_PJIT)
{
ExtOut("preJIT generated code\n");
}
taGCInfoAddr = TO_TADDR(codeHeaderData.GCInfo);
ExtOut("GC info %p\n", SOS_PTR(taGCInfoAddr));
// assume that GC encoding table is never more than
// 40 + methodSize * 2
int tableSize = 0;
if (!ClrSafeInt<int>::multiply(codeHeaderData.MethodSize, 2, tableSize) ||
!ClrSafeInt<int>::addition(tableSize, 40, tableSize))
{
ExtOut("<integer overflow>\n");
return E_FAIL;
}
ArrayHolder<BYTE> table = new NOTHROW BYTE[tableSize];
if (table == NULL)
{
ExtOut("Could not allocate memory to read the gc info.\n");
return E_OUTOFMEMORY;
}
memset(table, 0, tableSize);
// We avoid using move here, because we do not want to return
if (!SafeReadMemory(taGCInfoAddr, table, tableSize, NULL))
{
ExtOut("Could not read memory %p\n", SOS_PTR(taGCInfoAddr));
return Status;
}
// Mutable table pointer since we need to pass the appropriate
// offset into the table to DumpGCTable.
GCInfoToken gcInfoToken = { table, GCINFO_VERSION };
unsigned int methodSize = (unsigned int)codeHeaderData.MethodSize;
g_targetMachine->DumpGCInfo(gcInfoToken, methodSize, ExtOut, true /*encBytes*/, true /*bPrintHeader*/);
return Status;
}
GCEncodingInfo g_gcEncodingInfo; // The constructor should run to create the initial buffer allocation.
void DecodeGCTableEntry (const char *fmt, ...)
{
va_list va;
//
// Append the new data to the buffer. If it doesn't fit, allocate a new buffer that is bigger and try again.
//
va_start(va, fmt);
// Make sure there's at least a minimum amount of free space in the buffer. We need to minimally
// ensure that 'maxCchToWrite' is >0. 20 is an arbitrary smallish number.
if (!g_gcEncodingInfo.EnsureAdequateBufferSpace(20))
{
ExtOut("Could not allocate memory for GC info\n");
return;
}
while (true)
{
char* buffer = &g_gcEncodingInfo.buf[g_gcEncodingInfo.cchBuf];
size_t sizeOfBuffer = g_gcEncodingInfo.cchBufAllocation - g_gcEncodingInfo.cchBuf;
size_t maxCchToWrite = sizeOfBuffer - 1; // -1 to leave space for the null terminator
int cch = _vsnprintf_s(buffer, sizeOfBuffer, maxCchToWrite, fmt, va);
// cch == -1 should be the only negative result, but checking < 0 is defensive in case some runtime returns something else.
// We should also check "errno == ERANGE", but it seems that some runtimes don't set that properly.
if (cch < 0)
{
if (sizeOfBuffer > 1000)
{
// There must be some unexpected problem if we can't write the GC info into such a large buffer, so bail.
ExtOut("Error generating GC info\n");
break;
}
else if (!g_gcEncodingInfo.ReallocBuf())
{
// We couldn't reallocate the buffer; skip the rest of the text.
ExtOut("Could not allocate memory for GC info\n");
break;
}
// If we get here, we successfully reallocated the buffer larger, so we'll try again to write this entry
// into the larger buffer.
}
else
{
// We successfully added this entry to the GC info we're accumulating.
// cch is the number of characters written, not including the terminating null.
g_gcEncodingInfo.cchBuf += cch;
break;
}
}
va_end(va);
}
BOOL gatherEh(UINT clauseIndex,UINT totalClauses,DACEHInfo *pEHInfo,LPVOID token)
{
SOSEHInfo *pInfo = (SOSEHInfo *) token;
if (pInfo == NULL)
{
return FALSE;
}
if (pInfo->m_pInfos == NULL)
{
// First time, initialize structure
pInfo->EHCount = totalClauses;
pInfo->m_pInfos = new NOTHROW DACEHInfo[totalClauses];
if (pInfo->m_pInfos == NULL)
{
ReportOOM();
return FALSE;
}
}
pInfo->m_pInfos[clauseIndex] = *((DACEHInfo*)pEHInfo);
return TRUE;
}
HRESULT
GetClrMethodInstance(
___in ULONG64 NativeOffset,
___out IXCLRDataMethodInstance** Method);
typedef std::tuple<DacpMethodDescData, DacpCodeHeaderData, HRESULT> ExtractionCodeHeaderResult;
ExtractionCodeHeaderResult extractCodeHeaderData(DWORD_PTR methodDesc, DWORD_PTR dwStartAddr);
HRESULT displayGcInfo(BOOL fWithGCInfo, const DacpCodeHeaderData& codeHeaderData);
HRESULT GetIntermediateLangMap(BOOL bIL, const DacpCodeHeaderData& codeHeaderData,
std::unique_ptr<CLRDATA_IL_ADDRESS_MAP[]>& map,
ULONG32& mapCount,
BOOL dumpMap);
GetILAddressResult GetILAddress(const DacpMethodDescData& MethodDescData)
{
GetILAddressResult error = std::make_tuple(NULL, nullptr);
TADDR ilAddr = NULL;
struct DacpProfilerILData ilData;
ReleaseHolder<ISOSDacInterface7> sos7;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface7), &sos7)) &&
SUCCEEDED(sos7->GetProfilerModifiedILInformation(MethodDescData.MethodDescPtr, &ilData)))
{
if (ilData.type == DacpProfilerILData::ILModified)
{
ExtOut("Found profiler modified IL\n");
ilAddr = TO_TADDR(ilData.il);
}
}
// Factor this so that it returns a map from IL offset to the textual representation of the decoding
// to be consumed by !u -il
// The disassemble function can give a MethodDescData as well as the set of keys IL offsets
// This is not a dynamic method, print the IL for it.
// Get the module
DacpModuleData dmd;
if (dmd.Request(g_sos, MethodDescData.ModulePtr) != S_OK)
{
ExtOut("Unable to get module\n");
return error;
}
ToRelease<IMetaDataImport> pImport(MDImportForModule(&dmd));
if (pImport == NULL)
{
ExtOut("bad import\n");
return error;
}
if (ilAddr == NULL)
{
ULONG pRva;
DWORD dwFlags;
if (pImport->GetRVA(MethodDescData.MDToken, &pRva, &dwFlags) != S_OK)
{
ExtOut("error in import\n");
return error;
}
CLRDATA_ADDRESS ilAddrClr;
if (g_sos->GetILForModule(MethodDescData.ModulePtr, pRva, &ilAddrClr) != S_OK)
{
ExtOut("FindIL failed\n");
return error;
}
ilAddr = TO_TADDR(ilAddrClr);
}
if (ilAddr == NULL)
{
ExtOut("Unknown error in reading function IL\n");
return error;
}
GetILAddressResult result = std::make_tuple(ilAddr, pImport.Detach());
return result;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to unassembly a managed function. *
* It tries to print symbolic info for function call, contants... *
* *
\**********************************************************************/
DECLARE_API(u)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR dwStartAddr = NULL;
BOOL fWithGCInfo = FALSE;
BOOL fWithEHInfo = FALSE;
BOOL bSuppressLines = FALSE;
BOOL bDisplayOffsets = FALSE;
BOOL bDisplayILMap = FALSE;
BOOL bIL = FALSE;
BOOL dml = FALSE;
size_t nArg;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-gcinfo", &fWithGCInfo, COBOOL, FALSE},
{"-ehinfo", &fWithEHInfo, COBOOL, FALSE},
{"-n", &bSuppressLines, COBOOL, FALSE},
{"-o", &bDisplayOffsets, COBOOL, FALSE},
{"-il", &bIL, COBOOL, FALSE},
{"-map", &bDisplayILMap, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&dwStartAddr, COHEX},
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg) || (nArg < 1))
{
return Status;
}
// symlines will be non-zero only if SYMOPT_LOAD_LINES was set in the symbol options
ULONG symlines = 0;
if (!bSuppressLines && SUCCEEDED(g_ExtSymbols->GetSymbolOptions(&symlines)))
{
symlines &= SYMOPT_LOAD_LINES;
}
bSuppressLines = bSuppressLines || (symlines == 0);
EnableDMLHolder dmlHolder(dml);
// dwStartAddr is either some IP address or a MethodDesc. Start off assuming it's a
// MethodDesc.
DWORD_PTR methodDesc = dwStartAddr;
if (!IsMethodDesc(methodDesc))
{
// Not a methodDesc, so gotta find it ourselves
DWORD_PTR tmpAddr = dwStartAddr;
JITTypes jt;
DWORD_PTR gcinfoAddr;
IP2MethodDesc (tmpAddr, methodDesc, jt,
gcinfoAddr);
if (!methodDesc || jt == TYPE_UNKNOWN)
{
// It is not managed code.
ExtOut("Unmanaged code\n");
UnassemblyUnmanaged(dwStartAddr, bSuppressLines);
return Status;
}
}
ExtractionCodeHeaderResult p = extractCodeHeaderData(methodDesc, dwStartAddr);
Status = std::get<2>(p);
if (Status != S_OK)
{
return Status;
}
NameForMD_s(methodDesc, g_mdName, mdNameLen);
ExtOut("%S\n", g_mdName);
DacpMethodDescData& MethodDescData = std::get<0>(p);
DacpCodeHeaderData& codeHeaderData = std::get<1>(p);
std::unique_ptr<CLRDATA_IL_ADDRESS_MAP[]> map(nullptr);
ULONG32 mapCount = 0;
Status = GetIntermediateLangMap(bIL, codeHeaderData, map /*out*/, mapCount /* out */, bDisplayILMap);
if (Status != S_OK)
{
return Status;
}
// ///////////////////////////////////////////////////////////////////////////
// This can be reused with sildasm but kept as-is largely since it just
// works so it can be fixed later.
// ///////////////////////////////////////////////////////////////////////////
if (MethodDescData.bIsDynamic && MethodDescData.managedDynamicMethodObject)
{
ExtOut("Can only work with dynamic not implemented\n");
return Status;
}
GetILAddressResult result = GetILAddress(MethodDescData);
if (std::get<0>(result) == NULL)
{
ExtOut("ilAddr is %p\n", SOS_PTR(std::get<0>(result)));
return E_FAIL;
}
ExtOut("ilAddr is %p pImport is %p\n", SOS_PTR(std::get<0>(result)), SOS_PTR(std::get<1>(result)));
TADDR ilAddr = std::get<0>(result);
ToRelease<IMetaDataImport> pImport(std::get<1>(result));
/// Taken from DecodeILFromAddress(IMetaDataImport *pImport, TADDR ilAddr)
ULONG Size = GetILSize(ilAddr);
if (Size == 0)
{
ExtOut("error decoding IL\n");
return Status;
}
// Read the memory into a local buffer
ArrayHolder<BYTE> pArray = new BYTE[Size];
Status = g_ExtData->ReadVirtual(TO_CDADDR(ilAddr), pArray, Size, NULL);
if (Status != S_OK)
{
ExtOut("Failed to read memory\n");
return Status;
}
/// Taken from DecodeIL(pImport, pArray, Size);
// First decode the header
BYTE *buffer = pArray;
ULONG bufSize = Size;
COR_ILMETHOD *pHeader = (COR_ILMETHOD *) buffer;
COR_ILMETHOD_DECODER header(pHeader);
ULONG position = 0;
BYTE* pBuffer = const_cast<BYTE*>(header.Code);
UINT indentCount = 0;
ULONG endCodePosition = header.GetCodeSize();
struct ILLocationRange {
ULONG mStartPosition;
ULONG mEndPosition;
BYTE* mStartAddress;
BYTE* mEndAddress;
};
std::deque<ILLocationRange> ilCodePositions;
if (mapCount > 0)
{
while (position < endCodePosition)
{
ULONG mapIndex = 0;
do
{
while ((mapIndex < mapCount) && (position != map[mapIndex].ilOffset))
{
++mapIndex;
}
if (map[mapIndex].endAddress > map[mapIndex].startAddress)
{
break;
}
++mapIndex;
} while (mapIndex < mapCount);
std::tuple<ULONG, UINT> r = DecodeILAtPosition(
pImport, pBuffer, bufSize,
position, indentCount, header);
ExtOut("\n");
if (mapIndex < mapCount)
{
ILLocationRange entry = {
position,
std::get<0>(r) - 1,
(BYTE*)map[mapIndex].startAddress,
(BYTE*)map[mapIndex].endAddress
};
ilCodePositions.push_back(std::move(entry));
}
else
{
if (!ilCodePositions.empty())
{
auto& entry = ilCodePositions.back();
entry.mEndPosition = position;
}
}
position = std::get<0>(r);
indentCount = std::get<1>(r);
}
}
position = 0;
indentCount = 0;
std::function<void(ULONG*, UINT*, BYTE*)> displayILFun =
[&pImport, &pBuffer, bufSize, &header, &ilCodePositions](ULONG *pPosition, UINT *pIndentCount,
BYTE *pIp) -> void {
for (auto iter = ilCodePositions.begin(); iter != ilCodePositions.end(); ++iter)
{
if ((pIp >= iter->mStartAddress) && (pIp < iter->mEndAddress))
{
ULONG position = iter->mStartPosition;
ULONG endPosition = iter->mEndPosition;
while (position <= endPosition)
{
std::tuple<ULONG, UINT> r = DecodeILAtPosition(
pImport, pBuffer, bufSize,
position, *pIndentCount, header);
ExtOut("\n");
position = std::get<0>(r);
*pIndentCount = std::get<1>(r);
}
ilCodePositions.erase(iter);
break;
}
}
};
if (codeHeaderData.ColdRegionStart != NULL)
{
ExtOut("Begin %p, size %x. Cold region begin %p, size %x\n",
SOS_PTR(codeHeaderData.MethodStart), codeHeaderData.HotRegionSize,
SOS_PTR(codeHeaderData.ColdRegionStart), codeHeaderData.ColdRegionSize);
}
else
{
ExtOut("Begin %p, size %x\n", SOS_PTR(codeHeaderData.MethodStart), codeHeaderData.MethodSize);
}
Status = displayGcInfo(fWithGCInfo, codeHeaderData);
if (Status != S_OK)
{
return Status;
}
SOSEHInfo *pInfo = NULL;
if (fWithEHInfo)
{
pInfo = new NOTHROW SOSEHInfo;
if (pInfo == NULL)
{
ReportOOM();
}
else if (g_sos->TraverseEHInfo(codeHeaderData.MethodStart, gatherEh, (LPVOID)pInfo) != S_OK)
{
ExtOut("Failed to gather EHInfo data\n");
delete pInfo;
pInfo = NULL;
}
}
if (codeHeaderData.ColdRegionStart == NULL)
{
g_targetMachine->Unassembly (
(DWORD_PTR) codeHeaderData.MethodStart,
((DWORD_PTR)codeHeaderData.MethodStart) + codeHeaderData.MethodSize,
dwStartAddr,
(DWORD_PTR) MethodDescData.GCStressCodeCopy,
fWithGCInfo ? &g_gcEncodingInfo : NULL,
pInfo,
bSuppressLines,
bDisplayOffsets,
displayILFun);
}
else
{
ExtOut("Hot region:\n");
g_targetMachine->Unassembly (
(DWORD_PTR) codeHeaderData.MethodStart,
((DWORD_PTR)codeHeaderData.MethodStart) + codeHeaderData.HotRegionSize,
dwStartAddr,
(DWORD_PTR) MethodDescData.GCStressCodeCopy,
fWithGCInfo ? &g_gcEncodingInfo : NULL,
pInfo,
bSuppressLines,
bDisplayOffsets,
displayILFun);
ExtOut("Cold region:\n");
// Displaying gcinfo for a cold region requires knowing the size of
// the hot region preceeding.
g_gcEncodingInfo.hotSizeToAdd = codeHeaderData.HotRegionSize;
g_targetMachine->Unassembly (
(DWORD_PTR) codeHeaderData.ColdRegionStart,
((DWORD_PTR)codeHeaderData.ColdRegionStart) + codeHeaderData.ColdRegionSize,
dwStartAddr,
((DWORD_PTR) MethodDescData.GCStressCodeCopy) + codeHeaderData.HotRegionSize,
fWithGCInfo ? &g_gcEncodingInfo : NULL,
pInfo,
bSuppressLines,
bDisplayOffsets,
displayILFun);
}
if (pInfo)
{
delete pInfo;
pInfo = NULL;
}
if (fWithGCInfo)
{
g_gcEncodingInfo.Deinitialize();
}
return Status;
}
inline ExtractionCodeHeaderResult extractCodeHeaderData(DWORD_PTR methodDesc, DWORD_PTR dwStartAddr)
{
DacpMethodDescData MethodDescData;
HRESULT Status =
g_sos->GetMethodDescData(
TO_CDADDR(methodDesc),
dwStartAddr == methodDesc ? NULL : dwStartAddr,
&MethodDescData,
0, // cRevertedRejitVersions
NULL, // rgRevertedRejitData
NULL); // pcNeededRevertedRejitData
if (Status != S_OK)
{
ExtOut("Failed to get method desc for %p.\n", SOS_PTR(dwStartAddr));
return ExtractionCodeHeaderResult(std::move(MethodDescData), DacpCodeHeaderData(), Status);
}
if (!MethodDescData.bHasNativeCode)
{
ExtOut("Not jitted yet\n");
return ExtractionCodeHeaderResult(std::move(MethodDescData), DacpCodeHeaderData(), S_FALSE);
}
// Get the appropriate code header. If we were passed an MD, then use
// MethodDescData.NativeCodeAddr to find the code header; if we were passed an IP, use
// that IP to find the code header. This ensures that, for rejitted functions, we
// disassemble the rejit version that the user explicitly specified with their IP.
DacpCodeHeaderData codeHeaderData;
if (codeHeaderData.Request(
g_sos,
TO_CDADDR(
(dwStartAddr == methodDesc) ? MethodDescData.NativeCodeAddr : dwStartAddr)
) != S_OK)
{
ExtOut("Unable to get codeHeader information\n");
return ExtractionCodeHeaderResult(std::move(MethodDescData), DacpCodeHeaderData(), S_FALSE);
}
if (codeHeaderData.MethodStart == 0)
{
ExtOut("not a valid MethodDesc\n");
return ExtractionCodeHeaderResult(std::move(MethodDescData), DacpCodeHeaderData(), S_FALSE);
}
if (codeHeaderData.JITType == TYPE_UNKNOWN)
{
ExtOut("unknown Jit\n");
return ExtractionCodeHeaderResult(std::move(MethodDescData), DacpCodeHeaderData(), S_FALSE);
}
else if (codeHeaderData.JITType == TYPE_JIT)
{
ExtOut("Normal JIT generated code\n");
}
else if (codeHeaderData.JITType == TYPE_PJIT)
{
ExtOut("preJIT generated code\n");
}
return ExtractionCodeHeaderResult(std::move(MethodDescData), std::move(codeHeaderData), S_OK);
}
HRESULT displayGcInfo(BOOL fWithGCInfo, const DacpCodeHeaderData& codeHeaderData)
{
//
// Set up to mix gc info with the code if requested. To do this, we first generate all the textual
// gc info up front. This text is the same as the "!gcinfo" command, and looks like:
//
// Prolog size: 0
// Security object: <none>
// GS cookie: <none>
// PSPSym: <none>
// Generics inst context: <none>
// PSP slot: <none>
// GenericInst slot: <none>
// Varargs: 0
// Frame pointer: rbp
// Wants Report Only Leaf: 0
// Size of parameter area: 20
// Return Kind: Scalar
// Code size: 1ec
// Untracked: +rbp-10 +rbp-30 +rbp-48 +rbp-50 +rbp-58 +rbp-60 +rbp-68 +rbp-70
// 0000001e interruptible
// 0000003c +rax
// 0000004d +rdx
// 00000051 +rcx
// 00000056 -rdx -rcx -rax
// 0000005a +rcx
// 00000067 -rcx
// 00000080 +rcx
// 00000085 -rcx
// 0000009e +rcx
// 000000a3 -rcx
// 000000bc +rcx
// 000000c1 -rcx
// 000000d7 +rcx
// 000000e5 -rcx
// 000000ef +rax
// 0000010a +r8
// 00000119 +rcx
// 00000120 -r8 -rcx -rax
// 0000012f +rax
// 00000137 +r8
// 00000146 +rcx
// 00000150 -r8 -rcx -rax
// 0000015f +rax
// 00000167 +r8
// 00000176 +rcx
// 00000180 -r8 -rcx -rax
// 0000018f +rax
// 00000197 +r8
// 000001a6 +rcx
// 000001b0 -r8 -rcx -rax
// 000001b4 +rcx
// 000001b8 +rdx
// 000001bd -rdx -rcx
// 000001c8 +rcx
// 000001cd -rcx
// 000001d2 +rcx
// 000001d7 -rcx
// 000001e5 not interruptible
//
// For the entries without offset prefixes, we output them before the first offset of code.
// (Previously, we only displayed the "Untracked:" element, but displaying all this additional
// GC info is useful, and then the user doesn't need to also do a "!gcinfo" to see it.)
// For the entries with offset prefixes, we parse the offset, and display all relevant information
// before the current instruction offset being disassembled, that is, all the lines of GC info
// with an offset greater than the previous instruction and with an offset less than or equal
// to the offset of the current instruction.
// The actual GC Encoding Table, this is updated during the course of the function.
// Use a holder to make sure we clean up the memory for the table.
ArrayHolder<BYTE> table = NULL;
if (fWithGCInfo)
{
// assume that GC encoding table is never more than 40 + methodSize * 2
int tableSize = 0;
if (!ClrSafeInt<int>::multiply(codeHeaderData.MethodSize, 2, tableSize) ||
!ClrSafeInt<int>::addition(tableSize, 40, tableSize))
{
ExtOut("<integer overflow>\n");
return E_FAIL;
}
// Assign the new array to the mutable gcEncodingInfo table and to the
// table ArrayHolder to clean this up when the function exits.
table = new NOTHROW BYTE[tableSize];
if (table == NULL)
{
ExtOut("Could not allocate memory to read the gc info.\n");
return E_OUTOFMEMORY;
}
memset(table, 0, tableSize);
// We avoid using move here, because we do not want to return
if (!SafeReadMemory(TO_TADDR(codeHeaderData.GCInfo), table, tableSize, NULL))
{
ExtOut("Could not read memory %p\n", SOS_PTR(codeHeaderData.GCInfo));
return ERROR_INVALID_DATA;
}
//
// Skip the info header
//
unsigned int methodSize = (unsigned int)codeHeaderData.MethodSize;
if (!g_gcEncodingInfo.Initialize())
{
return E_OUTOFMEMORY;
}
GCInfoToken gcInfoToken = { table, GCINFO_VERSION };
g_targetMachine->DumpGCInfo(gcInfoToken, methodSize, DecodeGCTableEntry, false /*encBytes*/, false /*bPrintHeader*/);
}
return S_OK;
}
HRESULT GetIntermediateLangMap(BOOL bIL, const DacpCodeHeaderData& codeHeaderData,
std::unique_ptr<CLRDATA_IL_ADDRESS_MAP[]>& map,
ULONG32& mapCount,
BOOL dumpMap)
{
HRESULT Status = S_OK;
if (bIL)
{
ToRelease<IXCLRDataMethodInstance> pMethodInst(NULL);
if ((Status = GetClrMethodInstance(codeHeaderData.MethodStart, &pMethodInst)) != S_OK)
{
return Status;
}
if ((Status = pMethodInst->GetILAddressMap(mapCount, &mapCount, map.get())) != S_OK)
{
return Status;
}
map.reset(new NOTHROW CLRDATA_IL_ADDRESS_MAP[mapCount]);
if (map.get() == NULL)
{
ReportOOM();
return E_OUTOFMEMORY;
}
if ((Status = pMethodInst->GetILAddressMap(mapCount, &mapCount, map.get())) != S_OK)
{
return Status;
}
if (dumpMap)
{
for (ULONG32 i = 0; i < mapCount; i++)
{
// TODO: These information should be interleaved with the disassembly
// Decoded IL can be obtained through refactoring DumpIL code.
ExtOut("%08x %p %p\n", map[i].ilOffset, map[i].startAddress, map[i].endAddress);
}
}
}
return S_OK;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the in-memory stress log *
* !DumpLog [filename] *
* will dump the stress log corresponding to the clr.dll *
* loaded in the debuggee's VAS *
* !DumpLog -addr <addr_of_StressLog::theLog> [filename] *
* will dump the stress log associated with any DLL linked *
* against utilcode.lib, most commonly mscordbi.dll *
* (e.g. !DumpLog -addr mscordbi!StressLog::theLog) *
* *
\**********************************************************************/
DECLARE_API(DumpLog)
{
INIT_API_NO_RET_ON_FAILURE();
MINIDUMP_NOT_SUPPORTED();
_ASSERTE(g_pRuntime != nullptr);
// Not supported on desktop runtime
if (g_pRuntime->GetRuntimeConfiguration() == IRuntime::WindowsDesktop)
{
ExtErr("DumpLog not supported on desktop runtime\n");
return E_FAIL;
}
const char* fileName = "StressLog.txt";
CLRDATA_ADDRESS StressLogAddress = NULL;
StringHolder sFileName, sLogAddr;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-addr", &sLogAddr.data, COSTRING, TRUE}
};
CMDValue arg[] =
{ // vptr, type
{&sFileName.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg > 0 && sFileName.data != NULL)
{
fileName = sFileName.data;
}
// allow users to specify -addr mscordbdi!StressLog::theLog, for example.
if (sLogAddr.data != NULL)
{
StressLogAddress = GetExpression(sLogAddr.data);
}
if (StressLogAddress == NULL)
{
if (g_bDacBroken)
{
#ifndef FEATURE_PAL
if (IsWindowsTarget())
{
// Try to find stress log symbols
DWORD_PTR dwAddr = GetValueFromExpression("StressLog::theLog");
StressLogAddress = dwAddr;
}
else
#endif
{
ExtOut("No stress log address. DAC is broken; can't get it\n");
return E_FAIL;
}
}
else if (g_sos->GetStressLogAddress(&StressLogAddress) != S_OK)
{
ExtOut("Unable to find stress log via DAC\n");
return E_FAIL;
}
}
if (StressLogAddress == NULL)
{
ExtOut("Please provide the -addr argument for the address of the stress log, since no recognized runtime is loaded.\n");
return E_FAIL;
}
ExtOut("Attempting to dump Stress log to file '%s'\n", fileName);
Status = StressLog::Dump(StressLogAddress, fileName, g_ExtData);
if (Status == S_OK)
ExtOut("SUCCESS: Stress log dumped\n");
else if (Status == S_FALSE)
ExtOut("No Stress log in the image, no file written\n");
else
ExtOut("FAILURE: Stress log not dumped\n");
return Status;
}
#ifdef TRACE_GC
DECLARE_API (DumpGCLog)
{
INIT_API_NODAC();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
const char* fileName = "GCLog.txt";
while (isspace (*args))
args ++;
if (*args != 0)
fileName = args;
DWORD_PTR dwAddr = GetValueFromExpression("SVR::gc_log_buffer");
moveN (dwAddr, dwAddr);
if (dwAddr == 0)
{
dwAddr = GetValueFromExpression("WKS::gc_log_buffer");
moveN (dwAddr, dwAddr);
if (dwAddr == 0)
{
ExtOut("Can't get either WKS or SVR GC's log file");
return E_FAIL;
}
}
ExtOut("Dumping GC log at %08x\n", dwAddr);
g_bDacBroken = FALSE;
ExtOut("Attempting to dump GC log to file '%s'\n", fileName);
Status = E_FAIL;
HANDLE hGCLog = CreateFileA(
fileName,
GENERIC_WRITE,
FILE_SHARE_READ,
NULL,
CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hGCLog == INVALID_HANDLE_VALUE)
{
ExtOut("failed to create file: %d\n", GetLastError());
goto exit;
}
int iLogSize = 1024*1024;
BYTE* bGCLog = new NOTHROW BYTE[iLogSize];
if (bGCLog == NULL)
{
ReportOOM();
goto exit;
}
memset (bGCLog, 0, iLogSize);
if (!SafeReadMemory(dwAddr, bGCLog, iLogSize, NULL))
{
ExtOut("failed to read memory from %08x\n", dwAddr);
}
int iRealLogSize = iLogSize - 1;
while (iRealLogSize >= 0)
{
if (bGCLog[iRealLogSize] != '*')
{
break;
}
iRealLogSize--;
}
DWORD dwWritten = 0;
WriteFile (hGCLog, bGCLog, iRealLogSize + 1, &dwWritten, NULL);
Status = S_OK;
exit:
if (hGCLog != INVALID_HANDLE_VALUE)
{
CloseHandle (hGCLog);
}
if (Status == S_OK)
ExtOut("SUCCESS: Stress log dumped\n");
else if (Status == S_FALSE)
ExtOut("No Stress log in the image, no file written\n");
else
ExtOut("FAILURE: Stress log not dumped\n");
return Status;
}
#endif //TRACE_GC
#ifndef FEATURE_PAL
DECLARE_API (DumpGCConfigLog)
{
INIT_API();
#ifdef GC_CONFIG_DRIVEN
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
const char* fileName = "GCConfigLog.txt";
while (isspace (*args))
args ++;
if (*args != 0)
fileName = args;
if (!InitializeHeapData ())
{
ExtOut("GC Heap not initialized yet.\n");
return S_OK;
}
BOOL fIsServerGC = IsServerBuild();
DWORD_PTR dwAddr = 0;
DWORD_PTR dwAddrOffset = 0;
if (fIsServerGC)
{
dwAddr = GetValueFromExpression("SVR::gc_config_log_buffer");
dwAddrOffset = GetValueFromExpression("SVR::gc_config_log_buffer_offset");
}
else
{
dwAddr = GetValueFromExpression("WKS::gc_config_log_buffer");
dwAddrOffset = GetValueFromExpression("WKS::gc_config_log_buffer_offset");
}
moveN (dwAddr, dwAddr);
moveN (dwAddrOffset, dwAddrOffset);
if (dwAddr == 0)
{
ExtOut("Can't get either WKS or SVR GC's config log buffer");
return E_FAIL;
}
ExtOut("Dumping GC log at %08x\n", dwAddr);
g_bDacBroken = FALSE;
ExtOut("Attempting to dump GC log to file '%s'\n", fileName);
Status = E_FAIL;
HANDLE hGCLog = CreateFileA(
fileName,
GENERIC_WRITE,
FILE_SHARE_READ,
NULL,
OPEN_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hGCLog == INVALID_HANDLE_VALUE)
{
ExtOut("failed to create file: %d\n", GetLastError());
goto exit;
}
{
int iLogSize = (int)dwAddrOffset;
ArrayHolder<BYTE> bGCLog = new NOTHROW BYTE[iLogSize];
if (bGCLog == NULL)
{
ReportOOM();
goto exit;
}
memset (bGCLog, 0, iLogSize);
if (!SafeReadMemory(dwAddr, bGCLog, iLogSize, NULL))
{
ExtOut("failed to read memory from %08x\n", dwAddr);
}
SetFilePointer (hGCLog, 0, 0, FILE_END);
DWORD dwWritten;
WriteFile (hGCLog, bGCLog, iLogSize, &dwWritten, NULL);
}
Status = S_OK;
exit:
if (hGCLog != INVALID_HANDLE_VALUE)
{
CloseHandle (hGCLog);
}
if (Status == S_OK)
ExtOut("SUCCESS: Stress log dumped\n");
else if (Status == S_FALSE)
ExtOut("No Stress log in the image, no file written\n");
else
ExtOut("FAILURE: Stress log not dumped\n");
return Status;
#else
ExtOut("Not implemented\n");
return S_OK;
#endif //GC_CONFIG_DRIVEN
}
#endif // FEATURE_PAL
#ifdef GC_CONFIG_DRIVEN
static const char * const str_interesting_data_points[] =
{
"pre short", // 0
"post short", // 1
"merged pins", // 2
"converted pins", // 3
"pre pin", // 4
"post pin", // 5
"pre and post pin", // 6
"pre short padded", // 7
"post short padded", // 7
};
static const char * const str_heap_compact_reasons[] =
{
"low on ephemeral space",
"high fragmentation",
"couldn't allocate gaps",
"user specfied compact LOH",
"last GC before OOM",
"induced compacting GC",
"fragmented gen0 (ephemeral GC)",
"high memory load (ephemeral GC)",
"high memory load and frag",
"very high memory load and frag",
"no gc mode"
};
static BOOL gc_heap_compact_reason_mandatory_p[] =
{
TRUE, //compact_low_ephemeral = 0,
FALSE, //compact_high_frag = 1,
TRUE, //compact_no_gaps = 2,
TRUE, //compact_loh_forced = 3,
TRUE, //compact_last_gc = 4
TRUE, //compact_induced_compacting = 5,
FALSE, //compact_fragmented_gen0 = 6,
FALSE, //compact_high_mem_load = 7,
TRUE, //compact_high_mem_frag = 8,
TRUE, //compact_vhigh_mem_frag = 9,
TRUE //compact_no_gc_mode = 10
};
static const char * const str_heap_expand_mechanisms[] =
{
"reused seg with normal fit",
"reused seg with best fit",
"expand promoting eph",
"expand with a new seg",
"no memory for a new seg",
"expand in next full GC"
};
static const char * const str_bit_mechanisms[] =
{
"using mark list",
"demotion"
};
static const char * const str_gc_global_mechanisms[] =
{
"concurrent GCs",
"compacting GCs",
"promoting GCs",
"GCs that did demotion",
"card bundles",
"elevation logic"
};
void PrintInterestingGCInfo(DacpGCInterestingInfoData* dataPerHeap)
{
ExtOut("Interesting data points\n");
size_t* data = dataPerHeap->interestingDataPoints;
for (int i = 0; i < DAC_NUM_GC_DATA_POINTS; i++)
{
ExtOut("%20s: %d\n", str_interesting_data_points[i], data[i]);
}
ExtOut("\nCompacting reasons\n");
data = dataPerHeap->compactReasons;
for (int i = 0; i < DAC_MAX_COMPACT_REASONS_COUNT; i++)
{
ExtOut("[%s]%35s: %d\n", (gc_heap_compact_reason_mandatory_p[i] ? "M" : "W"), str_heap_compact_reasons[i], data[i]);
}
ExtOut("\nExpansion mechanisms\n");
data = dataPerHeap->expandMechanisms;
for (int i = 0; i < DAC_MAX_EXPAND_MECHANISMS_COUNT; i++)
{
ExtOut("%30s: %d\n", str_heap_expand_mechanisms[i], data[i]);
}
ExtOut("\nOther mechanisms enabled\n");
data = dataPerHeap->bitMechanisms;
for (int i = 0; i < DAC_MAX_GC_MECHANISM_BITS_COUNT; i++)
{
ExtOut("%20s: %d\n", str_bit_mechanisms[i], data[i]);
}
}
#endif //GC_CONFIG_DRIVEN
DECLARE_API(DumpGCData)
{
INIT_API();
#ifdef GC_CONFIG_DRIVEN
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
if (!InitializeHeapData ())
{
ExtOut("GC Heap not initialized yet.\n");
return S_OK;
}
DacpGCInterestingInfoData interestingInfo;
interestingInfo.RequestGlobal(g_sos);
for (int i = 0; i < DAC_MAX_GLOBAL_GC_MECHANISMS_COUNT; i++)
{
ExtOut("%-30s: %d\n", str_gc_global_mechanisms[i], interestingInfo.globalMechanisms[i]);
}
ExtOut("\n[info per heap]\n");
if (!IsServerBuild())
{
if (interestingInfo.Request(g_sos) != S_OK)
{
ExtOut("Error requesting interesting GC info\n");
return E_FAIL;
}
PrintInterestingGCInfo(&interestingInfo);
}
else
{
DWORD dwNHeaps = GetGcHeapCount();
DWORD dwAllocSize;
if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
{
ExtOut("Failed to get GCHeaps: integer overflow\n");
return Status;
}
CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
{
ExtOut("Failed to get GCHeaps\n");
return Status;
}
for (DWORD n = 0; n < dwNHeaps; n ++)
{
if (interestingInfo.Request(g_sos, heapAddrs[n]) != S_OK)
{
ExtOut("Heap %d: Error requesting interesting GC info\n", n);
return E_FAIL;
}
ExtOut("--------info for heap %d--------\n", n);
PrintInterestingGCInfo(&interestingInfo);
ExtOut("\n");
}
}
return S_OK;
#else
ExtOut("Not implemented\n");
return S_OK;
#endif //GC_CONFIG_DRIVEN
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the build number and type of the *
* runtime and SOS. *
* *
\**********************************************************************/
DECLARE_API(EEVersion)
{
INIT_API();
static const int fileVersionBufferSize = 1024;
ArrayHolder<char> fileVersionBuffer = new char[fileVersionBufferSize];
VS_FIXEDFILEINFO version;
BOOL ret = GetEEVersion(&version, fileVersionBuffer.GetPtr(), fileVersionBufferSize);
if (ret)
{
if (version.dwFileVersionMS != (DWORD)-1)
{
ExtOut("%u.%u.%u.%u",
HIWORD(version.dwFileVersionMS),
LOWORD(version.dwFileVersionMS),
HIWORD(version.dwFileVersionLS),
LOWORD(version.dwFileVersionLS));
if (IsRuntimeVersion(version, 3)) {
ExtOut(" (3.x runtime)");
}
#ifndef FEATURE_PAL
if (IsWindowsTarget())
{
if (version.dwFileFlags & VS_FF_DEBUG) {
ExtOut(" checked or debug build");
}
else
{
BOOL fRet = IsRetailBuild((size_t)g_pRuntime->GetModuleAddress());
if (fRet)
ExtOut(" retail");
else
ExtOut(" free");
}
}
#endif
ExtOut("\n");
if (fileVersionBuffer[0] != '\0') {
ExtOut("%s\n", fileVersionBuffer.GetPtr());
}
}
}
if (!InitializeHeapData())
ExtOut("GC Heap not initialized, so GC mode is not determined yet.\n");
else if (IsServerBuild())
ExtOut("Server mode with %d gc heaps\n", GetGcHeapCount());
else
ExtOut("Workstation mode\n");
if (!GetGcStructuresValid()) {
ExtOut("In plan phase of garbage collection\n");
}
#ifndef FEATURE_PAL
// Print SOS version
VS_FIXEDFILEINFO sosVersion;
if (IsWindowsTarget() && GetSOSVersion(&sosVersion))
{
if (sosVersion.dwFileVersionMS != (DWORD)-1)
{
ExtOut("SOS Version: %u.%u.%u.%u",
HIWORD(sosVersion.dwFileVersionMS),
LOWORD(sosVersion.dwFileVersionMS),
HIWORD(sosVersion.dwFileVersionLS),
LOWORD(sosVersion.dwFileVersionLS));
if (sosVersion.dwFileFlags & VS_FF_DEBUG) {
ExtOut(" debug build");
}
else {
ExtOut(" retail build");
}
ExtOut("\n");
}
}
#endif // FEATURE_PAL
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function the global SOS status *
* *
\**********************************************************************/
DECLARE_API(SOSStatus)
{
INIT_API_EXT();
BOOL bDesktop = FALSE;
BOOL bNetCore = FALSE;
BOOL bReset = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"-desktop", &bDesktop, COBOOL, FALSE},
{"-netcore", &bNetCore, COBOOL, FALSE},
#endif
{"-reset", &bReset, COBOOL, FALSE},
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
#ifndef FEATURE_PAL
if (bNetCore || bDesktop)
{
if (IsWindowsTarget())
{
PCSTR name = bDesktop ? "desktop CLR" : ".NET Core";;
if (!Runtime::SwitchRuntime(bDesktop))
{
ExtErr("The %s runtime is not loaded\n", name);
return E_FAIL;
}
ExtOut("Switched to %s runtime successfully\n", name);
return S_OK;
}
else
{
ExtErr("The '-desktop' and '-netcore' options are only supported on Windows targets\n");
return E_FAIL;
}
}
#endif
if (bReset)
{
Runtime::CleanupRuntimes();
CleanupTempDirectory();
ExtOut("SOS state reset\n");
return S_OK;
}
if (g_targetMachine != nullptr) {
ExtOut("Target platform: %04x Context size %04x\n", g_targetMachine->GetPlatform(), g_targetMachine->GetContextSize());
}
if (g_runtimeModulePath != nullptr) {
ExtOut("Runtime module path: %s\n", g_runtimeModulePath);
}
if (g_pRuntime != nullptr) {
g_pRuntime->DisplayStatus();
}
if (g_tmpPath != nullptr) {
ExtOut("Temp path: %s\n", g_tmpPath);
}
if (g_hostRuntimeDirectory != nullptr) {
ExtOut("Host runtime path: %s\n", g_hostRuntimeDirectory);
}
DisplaySymbolStore();
return Status;
}
#ifndef FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to print the environment setting for *
* the current process. *
* *
\**********************************************************************/
DECLARE_API (ProcInfo)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
if (IsDumpFile())
{
ExtOut("!ProcInfo is not supported on a dump file.\n");
return Status;
}
#define INFO_ENV 0x00000001
#define INFO_TIME 0x00000002
#define INFO_MEM 0x00000004
#define INFO_ALL 0xFFFFFFFF
DWORD fProcInfo = INFO_ALL;
if (_stricmp (args, "-env") == 0) {
fProcInfo = INFO_ENV;
}
if (_stricmp (args, "-time") == 0) {
fProcInfo = INFO_TIME;
}
if (_stricmp (args, "-mem") == 0) {
fProcInfo = INFO_MEM;
}
if (fProcInfo & INFO_ENV) {
ULONG64 pPeb;
if (FAILED(g_ExtSystem->GetCurrentProcessPeb(&pPeb)))
{
return Status;
}
ExtOut("---------------------------------------\n");
ExtOut("Environment\n");
static ULONG Offset_ProcessParam = -1;
static ULONG Offset_Environment = -1;
if (Offset_ProcessParam == -1)
{
ULONG TypeId;
ULONG64 NtDllBase;
if (SUCCEEDED(g_ExtSymbols->GetModuleByModuleName ("ntdll",0,NULL,
&NtDllBase)))
{
if (SUCCEEDED(g_ExtSymbols->GetTypeId (NtDllBase, "PEB", &TypeId)))
{
if (FAILED (g_ExtSymbols->GetFieldOffset(NtDllBase, TypeId,
"ProcessParameters", &Offset_ProcessParam)))
Offset_ProcessParam = -1;
}
if (SUCCEEDED(g_ExtSymbols->GetTypeId (NtDllBase, "_RTL_USER_PROCESS_PARAMETERS", &TypeId)))
{
if (FAILED (g_ExtSymbols->GetFieldOffset(NtDllBase, TypeId,
"Environment", &Offset_Environment)))
Offset_Environment = -1;
}
}
}
// We can not get it from PDB. Use the fixed one.
if (Offset_ProcessParam == -1)
Offset_ProcessParam = offsetof (DT_PEB, ProcessParameters);
if (Offset_Environment == -1)
Offset_Environment = offsetof (DT_RTL_USER_PROCESS_PARAMETERS, Environment);
ULONG64 addr = pPeb + Offset_ProcessParam;
DWORD_PTR value;
g_ExtData->ReadVirtual(UL64_TO_CDA(addr), &value, sizeof(PVOID), NULL);
addr = value + Offset_Environment;
g_ExtData->ReadVirtual(UL64_TO_CDA(addr), &value, sizeof(PVOID), NULL);
static WCHAR buffer[DT_OS_PAGE_SIZE/2];
ULONG readBytes = DT_OS_PAGE_SIZE;
ULONG64 Page;
if ((g_ExtData->ReadDebuggerData( DEBUG_DATA_MmPageSize, &Page, sizeof(Page), NULL)) == S_OK
&& Page > 0)
{
ULONG uPageSize = (ULONG)(ULONG_PTR)Page;
if (readBytes > uPageSize) {
readBytes = uPageSize;
}
}
addr = value;
while (1) {
if (IsInterrupt())
return Status;
if (FAILED(g_ExtData->ReadVirtual(UL64_TO_CDA(addr), &buffer, readBytes, NULL)))
break;
addr += readBytes;
WCHAR *pt = buffer;
WCHAR *end = pt;
while (pt < &buffer[DT_OS_PAGE_SIZE/2]) {
end = _wcschr (pt, L'\0');
if (end == NULL) {
char format[20];
sprintf_s (format,_countof (format), "%dS", &buffer[DT_OS_PAGE_SIZE/2] - pt);
ExtOut(format, pt);
break;
}
else if (end == pt) {
break;
}
ExtOut("%S\n", pt);
pt = end + 1;
}
if (end == pt) {
break;
}
}
}
HANDLE hProcess = INVALID_HANDLE_VALUE;
if (fProcInfo & (INFO_TIME | INFO_MEM)) {
ULONG64 handle;
if (FAILED(g_ExtSystem->GetCurrentProcessHandle(&handle)))
{
return Status;
}
hProcess = (HANDLE)handle;
}
if (!IsDumpFile() && fProcInfo & INFO_TIME) {
FILETIME CreationTime;
FILETIME ExitTime;
FILETIME KernelTime;
FILETIME UserTime;
typedef BOOL (WINAPI *FntGetProcessTimes)(HANDLE, LPFILETIME, LPFILETIME, LPFILETIME, LPFILETIME);
static FntGetProcessTimes pFntGetProcessTimes = (FntGetProcessTimes)-1;
if (pFntGetProcessTimes == (FntGetProcessTimes)-1) {
HINSTANCE hstat = LoadLibrary ("Kernel32.dll");
if (hstat != 0)
{
pFntGetProcessTimes = (FntGetProcessTimes)GetProcAddress (hstat, "GetProcessTimes");
FreeLibrary (hstat);
}
else
pFntGetProcessTimes = NULL;
}
if (pFntGetProcessTimes && pFntGetProcessTimes (hProcess,&CreationTime,&ExitTime,&KernelTime,&UserTime)) {
ExtOut("---------------------------------------\n");
ExtOut("Process Times\n");
static const char *Month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"};
SYSTEMTIME SystemTime;
FILETIME LocalFileTime;
if (FileTimeToLocalFileTime (&CreationTime,&LocalFileTime)
&& FileTimeToSystemTime (&LocalFileTime,&SystemTime)) {
ExtOut("Process Started at: %4d %s %2d %d:%d:%d.%02d\n",
SystemTime.wYear, Month[SystemTime.wMonth-1], SystemTime.wDay,
SystemTime.wHour, SystemTime.wMinute,
SystemTime.wSecond, SystemTime.wMilliseconds/10);
}
DWORD nDay = 0;
DWORD nHour = 0;
DWORD nMin = 0;
DWORD nSec = 0;
DWORD nHundred = 0;
ULONG64 totalTime;
totalTime = KernelTime.dwLowDateTime + (((ULONG64)KernelTime.dwHighDateTime) << 32);
nDay = (DWORD)(totalTime/(24*3600*10000000ui64));
totalTime %= 24*3600*10000000ui64;
nHour = (DWORD)(totalTime/(3600*10000000ui64));
totalTime %= 3600*10000000ui64;
nMin = (DWORD)(totalTime/(60*10000000));
totalTime %= 60*10000000;
nSec = (DWORD)(totalTime/10000000);
totalTime %= 10000000;
nHundred = (DWORD)(totalTime/100000);
ExtOut("Kernel CPU time : %d days %02d:%02d:%02d.%02d\n",
nDay, nHour, nMin, nSec, nHundred);
DWORD sDay = nDay;
DWORD sHour = nHour;
DWORD sMin = nMin;
DWORD sSec = nSec;
DWORD sHundred = nHundred;
totalTime = UserTime.dwLowDateTime + (((ULONG64)UserTime.dwHighDateTime) << 32);
nDay = (DWORD)(totalTime/(24*3600*10000000ui64));
totalTime %= 24*3600*10000000ui64;
nHour = (DWORD)(totalTime/(3600*10000000ui64));
totalTime %= 3600*10000000ui64;
nMin = (DWORD)(totalTime/(60*10000000));
totalTime %= 60*10000000;
nSec = (DWORD)(totalTime/10000000);
totalTime %= 10000000;
nHundred = (DWORD)(totalTime/100000);
ExtOut("User CPU time : %d days %02d:%02d:%02d.%02d\n",
nDay, nHour, nMin, nSec, nHundred);
sDay += nDay;
sHour += nHour;
sMin += nMin;
sSec += nSec;
sHundred += nHundred;
if (sHundred >= 100) {
sSec += sHundred/100;
sHundred %= 100;
}
if (sSec >= 60) {
sMin += sSec/60;
sSec %= 60;
}
if (sMin >= 60) {
sHour += sMin/60;
sMin %= 60;
}
if (sHour >= 24) {
sDay += sHour/24;
sHour %= 24;
}
ExtOut("Total CPU time : %d days %02d:%02d:%02d.%02d\n",
sDay, sHour, sMin, sSec, sHundred);
}
}
if (!IsDumpFile() && fProcInfo & INFO_MEM) {
typedef
NTSTATUS
(NTAPI
*FntNtQueryInformationProcess)(HANDLE, PROCESSINFOCLASS, PVOID, ULONG, PULONG);
static FntNtQueryInformationProcess pFntNtQueryInformationProcess = (FntNtQueryInformationProcess)-1;
if (pFntNtQueryInformationProcess == (FntNtQueryInformationProcess)-1) {
HINSTANCE hstat = LoadLibrary ("ntdll.dll");
if (hstat != 0)
{
pFntNtQueryInformationProcess = (FntNtQueryInformationProcess)GetProcAddress (hstat, "NtQueryInformationProcess");
FreeLibrary (hstat);
}
else
pFntNtQueryInformationProcess = NULL;
}
VM_COUNTERS memory;
if (pFntNtQueryInformationProcess &&
NT_SUCCESS (pFntNtQueryInformationProcess (hProcess,ProcessVmCounters,&memory,sizeof(memory),NULL))) {
ExtOut("---------------------------------------\n");
ExtOut("Process Memory\n");
ExtOut("WorkingSetSize: %8d KB PeakWorkingSetSize: %8d KB\n",
memory.WorkingSetSize/1024, memory.PeakWorkingSetSize/1024);
ExtOut("VirtualSize: %8d KB PeakVirtualSize: %8d KB\n",
memory.VirtualSize/1024, memory.PeakVirtualSize/1024);
ExtOut("PagefileUsage: %8d KB PeakPagefileUsage: %8d KB\n",
memory.PagefileUsage/1024, memory.PeakPagefileUsage/1024);
}
MEMORYSTATUS memstat;
GlobalMemoryStatus (&memstat);
ExtOut("---------------------------------------\n");
ExtOut("%ld percent of memory is in use.\n\n",
memstat.dwMemoryLoad);
ExtOut("Memory Availability (Numbers in MB)\n\n");
ExtOut(" %8s %8s\n", "Total", "Avail");
ExtOut("Physical Memory %8d %8d\n", memstat.dwTotalPhys/1024/1024, memstat.dwAvailPhys/1024/1024);
ExtOut("Page File %8d %8d\n", memstat.dwTotalPageFile/1024/1024, memstat.dwAvailPageFile/1024/1024);
ExtOut("Virtual Memory %8d %8d\n", memstat.dwTotalVirtual/1024/1024, memstat.dwAvailVirtual/1024/1024);
}
return Status;
}
#endif // FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find the address of EE data for a *
* metadata token. *
* *
\**********************************************************************/
DECLARE_API(Token2EE)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
StringHolder DllName;
ULONG64 token = 0;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&DllName.data, COSTRING},
{&token, COHEX}
};
size_t nArg;
if (!GetCMDOption(args,option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg!=2)
{
ExtOut("Usage: !Token2EE module_name mdToken\n");
ExtOut(" You can pass * for module_name to search all modules.\n");
return Status;
}
EnableDMLHolder dmlHolder(dml);
int numModule;
ArrayHolder<DWORD_PTR> moduleList = NULL;
if (strcmp(DllName.data, "*") == 0)
{
moduleList = ModuleFromName(NULL, &numModule);
}
else
{
moduleList = ModuleFromName(DllName.data, &numModule);
}
if (moduleList == NULL)
{
ExtOut("Failed to request module list.\n");
}
else
{
for (int i = 0; i < numModule; i ++)
{
if (IsInterrupt())
break;
if (i > 0)
{
ExtOut("--------------------------------------\n");
}
DWORD_PTR dwAddr = moduleList[i];
WCHAR FileName[MAX_LONGPATH];
FileNameForModule(dwAddr, FileName);
// We'd like a short form for this output
LPWSTR pszFilename = _wcsrchr (FileName, DIRECTORY_SEPARATOR_CHAR_W);
if (pszFilename == NULL)
{
pszFilename = FileName;
}
else
{
pszFilename++; // skip past the last "\" character
}
DMLOut("Module: %s\n", DMLModule(dwAddr));
ExtOut("Assembly: %S\n", pszFilename);
GetInfoFromModule(dwAddr, (ULONG)token);
}
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find the address of EE data for a *
* metadata token. *
* *
\**********************************************************************/
DECLARE_API(Name2EE)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
StringHolder DllName, TypeName;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&DllName.data, COSTRING},
{&TypeName.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (nArg == 1)
{
// The input may be in the form <modulename>!<type>
// If so, do some surgery on the input params.
// There should be only 1 ! character
LPSTR pszSeperator = strchr (DllName.data, '!');
if (pszSeperator != NULL)
{
if (strchr (pszSeperator + 1, '!') == NULL)
{
size_t capacity_TypeName_data = strlen(pszSeperator + 1) + 1;
TypeName.data = new NOTHROW char[capacity_TypeName_data];
if (TypeName.data)
{
// get the type name,
strcpy_s (TypeName.data, capacity_TypeName_data, pszSeperator + 1);
// and truncate DllName
*pszSeperator = '\0';
// Do some extra validation
if (strlen (DllName.data) >= 1 &&
strlen (TypeName.data) > 1)
{
nArg = 2;
}
}
}
}
}
if (nArg != 2)
{
ExtOut("Usage: " SOSPrefix "name2ee module_name item_name\n");
ExtOut(" or " SOSPrefix "name2ee module_name!item_name\n");
ExtOut(" use * for module_name to search all loaded modules\n");
ExtOut("Examples: " SOSPrefix "name2ee mscorlib.dll System.String.ToString\n");
ExtOut(" " SOSPrefix "name2ee *!System.String\n");
return Status;
}
int numModule;
ArrayHolder<DWORD_PTR> moduleList = NULL;
if (strcmp(DllName.data, "*") == 0)
{
moduleList = ModuleFromName(NULL, &numModule);
}
else
{
moduleList = ModuleFromName(DllName.data, &numModule);
}
if (moduleList == NULL)
{
ExtOut("Failed to request module list.\n", DllName.data);
}
else
{
for (int i = 0; i < numModule; i ++)
{
if (IsInterrupt())
break;
if (i > 0)
{
ExtOut("--------------------------------------\n");
}
DWORD_PTR dwAddr = moduleList[i];
WCHAR FileName[MAX_LONGPATH];
FileNameForModule (dwAddr, FileName);
// We'd like a short form for this output
LPWSTR pszFilename = _wcsrchr (FileName, DIRECTORY_SEPARATOR_CHAR_W);
if (pszFilename == NULL)
{
pszFilename = FileName;
}
else
{
pszFilename++; // skip past the last "\" character
}
DMLOut("Module: %s\n", DMLModule(dwAddr));
ExtOut("Assembly: %S\n", pszFilename);
GetInfoFromName(dwAddr, TypeName.data);
}
}
return Status;
}
DECLARE_API(PathTo)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
DWORD_PTR root = NULL;
DWORD_PTR target = NULL;
BOOL dml = FALSE;
size_t nArg;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&root, COHEX},
{&target, COHEX},
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (root == 0 || target == 0)
{
ExtOut("Invalid argument %s\n", args);
return Status;
}
GCRootImpl gcroot;
bool result = gcroot.PrintPathToObject(root, target);
if (!result)
ExtOut("Did not find a path from %p to %p.\n", SOS_PTR(root), SOS_PTR(target));
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function finds all roots (on stack or in handles) for a *
* given object. *
* *
\**********************************************************************/
DECLARE_API(GCRoot)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL bNoStacks = FALSE;
DWORD_PTR obj = NULL;
BOOL dml = FALSE;
BOOL all = FALSE;
size_t nArg;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-nostacks", &bNoStacks, COBOOL, FALSE},
{"-all", &all, COBOOL, FALSE},
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&obj, COHEX}
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (obj == 0)
{
ExtOut("Invalid argument %s\n", args);
return Status;
}
EnableDMLHolder dmlHolder(dml);
GCRootImpl gcroot;
int i = gcroot.PrintRootsForObject(obj, all == TRUE, bNoStacks == TRUE);
if (IsInterrupt())
ExtOut("Interrupted, data may be incomplete.\n");
if (all)
ExtOut("Found %d roots.\n", i);
else
ExtOut("Found %d unique roots (run '" SOSPrefix "gcroot -all' to see all roots).\n", i);
return Status;
}
DECLARE_API(GCWhere)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
BOOL dml = FALSE;
BOOL bGetBrick;
BOOL bGetCard;
TADDR taddrObj = 0;
size_t nArg;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-brick", &bGetBrick, COBOOL, FALSE},
{"-card", &bGetCard, COBOOL, FALSE},
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&taddrObj, COHEX}
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
// Obtain allocation context for each managed thread.
AllocInfo allocInfo;
allocInfo.Init();
TADDR_SEGINFO trngSeg = { 0, 0, 0 };
TADDR_RANGE allocCtx = { 0, 0 };
int gen = -1;
BOOL bLarge = FALSE;
BOOL bFound = FALSE;
size_t size = 0;
if (sos::IsObject(taddrObj))
{
TADDR taddrMT;
BOOL bContainsPointers;
if(FAILED(GetMTOfObject(taddrObj, &taddrMT)) ||
!GetSizeEfficient(taddrObj, taddrMT, FALSE, size, bContainsPointers))
{
ExtWarn("Couldn't get size for object %#p: possible heap corruption.\n",
SOS_PTR(taddrObj));
}
}
if (!IsServerBuild())
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos) != S_OK)
{
ExtOut("Error requesting gc heap details\n");
return Status;
}
if (GCObjInHeap(taddrObj, heapDetails, trngSeg, gen, allocCtx, bLarge))
{
ExtOut("Address " WIN64_8SPACES " Gen Heap segment " WIN64_8SPACES " begin " WIN64_8SPACES " allocated " WIN64_8SPACES " size\n");
ExtOut("%p %d %2d %p %p %p 0x%x(%d)\n",
SOS_PTR(taddrObj), gen, 0, SOS_PTR(trngSeg.segAddr), SOS_PTR(trngSeg.start), SOS_PTR(trngSeg.end), size, size);
bFound = TRUE;
}
}
else
{
DacpGcHeapData gcheap;
if (gcheap.Request(g_sos) != S_OK)
{
ExtOut("Error requesting GC Heap data\n");
return Status;
}
DWORD dwAllocSize;
DWORD dwNHeaps = gcheap.HeapCount;
if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
{
ExtOut("Failed to get GCHeaps: integer overflow\n");
return Status;
}
CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
{
ExtOut("Failed to get GCHeaps\n");
return Status;
}
for (DWORD n = 0; n < dwNHeaps; n ++)
{
DacpGcHeapDetails heapDetails;
if (heapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
{
ExtOut("Error requesting details\n");
return Status;
}
if (GCObjInHeap(taddrObj, heapDetails, trngSeg, gen, allocCtx, bLarge))
{
ExtOut("Address " WIN64_8SPACES " Gen Heap segment " WIN64_8SPACES " begin " WIN64_8SPACES " allocated" WIN64_8SPACES " size\n");
ExtOut("%p %d %2d %p %p %p 0x%x(%d)\n",
SOS_PTR(taddrObj), gen, n, SOS_PTR(trngSeg.segAddr), SOS_PTR(trngSeg.start), SOS_PTR(trngSeg.end), size, size);
bFound = TRUE;
break;
}
}
}
if (!bFound)
{
ExtOut("Address %#p not found in the managed heap.\n", SOS_PTR(taddrObj));
}
return Status;
}
#ifndef FEATURE_PAL
DECLARE_API(FindRoots)
{
#ifndef FEATURE_PAL
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
if (IsDumpFile())
{
ExtOut("!FindRoots is not supported on a dump file.\n");
return Status;
}
LONG_PTR gen = -100; // initialized outside the legal range: [-1, 2]
StringHolder sgen;
TADDR taObj = NULL;
BOOL dml = FALSE;
size_t nArg;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-gen", &sgen.data, COSTRING, TRUE},
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&taObj, COHEX}
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (sgen.data != NULL)
{
if (_stricmp(sgen.data, "any") == 0)
{
gen = -1;
}
else
{
gen = GetExpression(sgen.data);
}
}
if ((gen < -1 || gen > 2) && (taObj == 0))
{
ExtOut("Incorrect options. Usage:\n\t!FindRoots -gen <N>\n\t\twhere N is 0, 1, 2, or \"any\". OR\n\t!FindRoots <obj>\n");
return Status;
}
if (gen >= -1 && gen <= 2)
{
IXCLRDataProcess2* idp2 = NULL;
if (FAILED(g_clrData->QueryInterface(IID_IXCLRDataProcess2, (void**) &idp2)))
{
ExtOut("Your version of the runtime/DAC do not support this command.\n");
return Status;
}
// Request GC_MARK_END notifications from debuggee
GcEvtArgs gea = { GC_MARK_END, { ((gen == -1) ? 7 : (1 << gen)) } };
idp2->SetGcNotification(gea);
// ... and register the notification handler
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "sxe -c \"!SOSHandleCLRN\" clrn", 0);
// the above notification is removed in CNotification::OnGcEvent()
}
else
{
// verify that the last event in the debugger was indeed a CLRN exception
DEBUG_LAST_EVENT_INFO_EXCEPTION dle;
CNotification Notification;
if (!CheckCLRNotificationEvent(&dle))
{
ExtOut("The command !FindRoots can only be used after the debugger stopped on a CLRN GC notification.\n");
ExtOut("At this time !GCRoot should be used instead.\n");
return Status;
}
// validate argument
if (!g_snapshot.Build())
{
ExtOut("Unable to build snapshot of the garbage collector state\n");
return Status;
}
if (g_snapshot.GetHeap(taObj) == NULL)
{
ExtOut("Address %#p is not in the managed heap.\n", SOS_PTR(taObj));
return Status;
}
int ogen = g_snapshot.GetGeneration(taObj);
if (ogen > CNotification::GetCondemnedGen())
{
DMLOut("Object %s will survive this collection:\n\tgen(%#p) = %d > %d = condemned generation.\n",
DMLObject(taObj), SOS_PTR(taObj), ogen, CNotification::GetCondemnedGen());
return Status;
}
GCRootImpl gcroot;
int roots = gcroot.FindRoots(CNotification::GetCondemnedGen(), taObj);
ExtOut("Found %d roots.\n", roots);
}
return Status;
#else
return E_NOTIMPL;
#endif
}
class GCHandleStatsForDomains
{
public:
GCHandleStatsForDomains()
: m_singleDomainMode(FALSE), m_numDomains(0), m_pStatistics(NULL), m_pDomainPointers(NULL), m_sharedDomainIndex(-1), m_systemDomainIndex(-1)
{
}
~GCHandleStatsForDomains()
{
if (m_pStatistics)
{
if (m_singleDomainMode)
delete m_pStatistics;
else
delete [] m_pStatistics;
}
if (m_pDomainPointers)
delete [] m_pDomainPointers;
}
BOOL Init(BOOL singleDomainMode)
{
m_singleDomainMode = singleDomainMode;
if (m_singleDomainMode)
{
m_numDomains = 1;
m_pStatistics = new NOTHROW GCHandleStatistics();
if (m_pStatistics == NULL)
return FALSE;
}
else
{
DacpAppDomainStoreData adsData;
if (adsData.Request(g_sos) != S_OK)
return FALSE;
LONG numSpecialDomains = (adsData.sharedDomain != NULL) ? 2 : 1;
m_numDomains = adsData.DomainCount + numSpecialDomains;
ArrayHolder<CLRDATA_ADDRESS> pArray = new NOTHROW CLRDATA_ADDRESS[m_numDomains];
if (pArray == NULL)
return FALSE;
int i = 0;
if (adsData.sharedDomain != NULL)
{
pArray[i++] = adsData.sharedDomain;
}
pArray[i] = adsData.systemDomain;
m_sharedDomainIndex = i - 1; // The m_sharedDomainIndex is set to -1 if there is no shared domain
m_systemDomainIndex = i;
if (g_sos->GetAppDomainList(adsData.DomainCount, pArray+numSpecialDomains, NULL) != S_OK)
return FALSE;
m_pDomainPointers = pArray.Detach();
m_pStatistics = new NOTHROW GCHandleStatistics[m_numDomains];
if (m_pStatistics == NULL)
return FALSE;
}
return TRUE;
}
GCHandleStatistics *LookupStatistics(CLRDATA_ADDRESS appDomainPtr) const
{
if (m_singleDomainMode)
{
// You can pass NULL appDomainPtr if you are in singleDomainMode
return m_pStatistics;
}
else
{
for (int i=0; i < m_numDomains; i++)
if (m_pDomainPointers[i] == appDomainPtr)
return m_pStatistics + i;
}
return NULL;
}
GCHandleStatistics *GetStatistics(int appDomainIndex) const
{
SOS_Assert(appDomainIndex >= 0);
SOS_Assert(appDomainIndex < m_numDomains);
return m_singleDomainMode ? m_pStatistics : m_pStatistics + appDomainIndex;
}
int GetNumDomains() const
{
return m_numDomains;
}
CLRDATA_ADDRESS GetDomain(int index) const
{
SOS_Assert(index >= 0);
SOS_Assert(index < m_numDomains);
return m_pDomainPointers[index];
}
int GetSharedDomainIndex()
{
return m_sharedDomainIndex;
}
int GetSystemDomainIndex()
{
return m_systemDomainIndex;
}
private:
BOOL m_singleDomainMode;
int m_numDomains;
GCHandleStatistics *m_pStatistics;
CLRDATA_ADDRESS *m_pDomainPointers;
int m_sharedDomainIndex;
int m_systemDomainIndex;
};
class GCHandlesImpl
{
public:
GCHandlesImpl(PCSTR args)
: mPerDomain(FALSE), mStat(FALSE), mDML(FALSE), mType((int)~0)
{
ArrayHolder<char> type = NULL;
CMDOption option[] =
{
{"-perdomain", &mPerDomain, COBOOL, FALSE},
{"-stat", &mStat, COBOOL, FALSE},
{"-type", &type, COSTRING, TRUE},
{"/d", &mDML, COBOOL, FALSE},
};
if (!GetCMDOption(args,option,_countof(option),NULL,0,NULL))
sos::Throw<sos::Exception>("Failed to parse command line arguments.");
if (type != NULL)
if (_stricmp(type, "Pinned") == 0)
mType = HNDTYPE_PINNED;
else if (_stricmp(type, "RefCounted") == 0)
mType = HNDTYPE_REFCOUNTED;
else if (_stricmp(type, "WeakShort") == 0)
mType = HNDTYPE_WEAK_SHORT;
else if (_stricmp(type, "WeakLong") == 0)
mType = HNDTYPE_WEAK_LONG;
else if (_stricmp(type, "Strong") == 0)
mType = HNDTYPE_STRONG;
else if (_stricmp(type, "Variable") == 0)
mType = HNDTYPE_VARIABLE;
else if (_stricmp(type, "AsyncPinned") == 0)
mType = HNDTYPE_ASYNCPINNED;
else if (_stricmp(type, "SizedRef") == 0)
mType = HNDTYPE_SIZEDREF;
else if (_stricmp(type, "Dependent") == 0)
mType = HNDTYPE_DEPENDENT;
else if (_stricmp(type, "WeakWinRT") == 0)
mType = HNDTYPE_WEAK_WINRT;
else
sos::Throw<sos::Exception>("Unknown handle type '%s'.", type.GetPtr());
}
void Run()
{
EnableDMLHolder dmlHolder(mDML);
mOut.ReInit(6, POINTERSIZE_HEX, AlignRight);
mOut.SetWidths(5, POINTERSIZE_HEX, 11, POINTERSIZE_HEX, 8, POINTERSIZE_HEX);
mOut.SetColAlignment(1, AlignLeft);
if (mHandleStat.Init(!mPerDomain) == FALSE)
sos::Throw<sos::Exception>("Error getting per-appdomain handle information");
if (!mStat)
mOut.WriteRow("Handle", "Type", "Object", "Size", "Data", "Type");
WalkHandles();
for (int i=0; (i < mHandleStat.GetNumDomains()) && !IsInterrupt(); i++)
{
GCHandleStatistics *pStats = mHandleStat.GetStatistics(i);
if (mPerDomain)
{
Print( "------------------------------------------------------------------------------\n");
Print("GC Handle Statistics for AppDomain ", AppDomainPtr(mHandleStat.GetDomain(i)));
if (i == mHandleStat.GetSharedDomainIndex())
Print(" (Shared Domain)\n");
else if (i == mHandleStat.GetSystemDomainIndex())
Print(" (System Domain)\n");
else
Print("\n");
}
if (!mStat)
Print("\n");
PrintGCStat(&pStats->hs);
// Don't print handle stats if the user has filtered by type. All handles will be the same
// type, and the total count will be displayed by PrintGCStat.
if (mType == (unsigned int)~0)
{
Print("\n");
PrintGCHandleStats(pStats);
}
}
}
private:
void WalkHandles()
{
ToRelease<ISOSHandleEnum> handles;
if (FAILED(g_sos->GetHandleEnum(&handles)))
{
if (IsMiniDumpFile())
sos::Throw<sos::Exception>("Unable to display GC handles.\nA minidump without full memory may not have this information.");
else
sos::Throw<sos::Exception>("Failed to walk the handle table.");
}
// GCC can't handle stacks which are too large.
#ifndef FEATURE_PAL
SOSHandleData data[256];
#else
SOSHandleData data[4];
#endif
unsigned int fetched = 0;
HRESULT hr = S_OK;
do
{
if (FAILED(hr = handles->Next(_countof(data), data, &fetched)))
{
ExtOut("Error %x while walking the handle table.\n", hr);
break;
}
WalkHandles(data, fetched);
} while (_countof(data) == fetched);
}
void WalkHandles(SOSHandleData data[], unsigned int count)
{
for (unsigned int i = 0; i < count; ++i)
{
sos::CheckInterrupt();
if (mType != (unsigned int)~0 && mType != data[i].Type)
continue;
GCHandleStatistics *pStats = mHandleStat.LookupStatistics(data[i].AppDomain);
TADDR objAddr = 0;
TADDR mtAddr = 0;
size_t size = 0;
const WCHAR *mtName = 0;
const char *type = 0;
if (FAILED(MOVE(objAddr, data[i].Handle)))
{
objAddr = 0;
mtName = W("<error>");
}
else
{
sos::Object obj(TO_TADDR(objAddr));
mtAddr = obj.GetMT();
if (sos::MethodTable::IsFreeMT(mtAddr))
{
mtName = W("<free>");
}
else if (!sos::MethodTable::IsValid(mtAddr))
{
mtName = W("<error>");
}
else
{
size = obj.GetSize();
if (mType == (unsigned int)~0 || mType == data[i].Type)
pStats->hs.Add(obj.GetMT(), (DWORD)size);
}
}
switch(data[i].Type)
{
case HNDTYPE_PINNED:
type = "Pinned";
if (pStats) pStats->pinnedHandleCount++;
break;
case HNDTYPE_REFCOUNTED:
type = "RefCounted";
if (pStats) pStats->refCntHandleCount++;
break;
case HNDTYPE_STRONG:
type = "Strong";
if (pStats) pStats->strongHandleCount++;
break;
case HNDTYPE_WEAK_SHORT:
type = "WeakShort";
if (pStats) pStats->weakShortHandleCount++;
break;
case HNDTYPE_WEAK_LONG:
type = "WeakLong";
if (pStats) pStats->weakLongHandleCount++;
break;
case HNDTYPE_ASYNCPINNED:
type = "AsyncPinned";
if (pStats) pStats->asyncPinnedHandleCount++;
break;
case HNDTYPE_VARIABLE:
type = "Variable";
if (pStats) pStats->variableCount++;
break;
case HNDTYPE_SIZEDREF:
type = "SizedRef";
if (pStats) pStats->sizedRefCount++;
break;
case HNDTYPE_DEPENDENT:
type = "Dependent";
if (pStats) pStats->dependentCount++;
break;
case HNDTYPE_WEAK_WINRT:
type = "WeakWinRT";
if (pStats) pStats->weakWinRTHandleCount++;
break;
default:
DebugBreak();
type = "Unknown";
pStats->unknownHandleCount++;
break;
}
if (type && !mStat)
{
sos::MethodTable mt = mtAddr;
if (mtName == 0)
mtName = mt.GetName();
if (data[i].Type == HNDTYPE_REFCOUNTED)
mOut.WriteRow(data[i].Handle, type, ObjectPtr(objAddr), Decimal(size), Decimal(data[i].RefCount), mtName);
else if (data[i].Type == HNDTYPE_DEPENDENT)
mOut.WriteRow(data[i].Handle, type, ObjectPtr(objAddr), Decimal(size), ObjectPtr(data[i].Secondary), mtName);
else if (data[i].Type == HNDTYPE_WEAK_WINRT)
mOut.WriteRow(data[i].Handle, type, ObjectPtr(objAddr), Decimal(size), Pointer(data[i].Secondary), mtName);
else
mOut.WriteRow(data[i].Handle, type, ObjectPtr(objAddr), Decimal(size), "", mtName);
}
}
}
inline void PrintHandleRow(const char *text, int count)
{
if (count)
mOut.WriteRow(text, Decimal(count));
}
void PrintGCHandleStats(GCHandleStatistics *pStats)
{
Print("Handles:\n");
mOut.ReInit(2, 21, AlignLeft, 4);
PrintHandleRow("Strong Handles:", pStats->strongHandleCount);
PrintHandleRow("Pinned Handles:", pStats->pinnedHandleCount);
PrintHandleRow("Async Pinned Handles:", pStats->asyncPinnedHandleCount);
PrintHandleRow("Ref Count Handles:", pStats->refCntHandleCount);
PrintHandleRow("Weak Long Handles:", pStats->weakLongHandleCount);
PrintHandleRow("Weak Short Handles:", pStats->weakShortHandleCount);
PrintHandleRow("Weak WinRT Handles:", pStats->weakWinRTHandleCount);
PrintHandleRow("Variable Handles:", pStats->variableCount);
PrintHandleRow("SizedRef Handles:", pStats->sizedRefCount);
PrintHandleRow("Dependent Handles:", pStats->dependentCount);
PrintHandleRow("Other Handles:", pStats->unknownHandleCount);
}
private:
BOOL mPerDomain, mStat, mDML;
unsigned int mType;
TableOutput mOut;
GCHandleStatsForDomains mHandleStat;
};
/**********************************************************************\
* Routine Description: *
* *
* This function dumps GC Handle statistics *
* *
\**********************************************************************/
DECLARE_API(GCHandles)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
try
{
GCHandlesImpl gchandles(args);
gchandles.Run();
}
catch(const sos::Exception &e)
{
Print(e.what());
}
return Status;
}
// This is an experimental and undocumented SOS API that attempts to step through code
// stopping once jitted code is reached. It currently has some issues - it can take arbitrarily long
// to reach jitted code and canceling it is terrible. At best it doesn't cancel, at worst it
// kills the debugger. IsInterrupt() doesn't work nearly as nicely as one would hope :/
#ifndef FEATURE_PAL
DECLARE_API(TraceToCode)
{
INIT_API_NODAC();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
_ASSERTE(g_pRuntime != nullptr);
while(true)
{
if (IsInterrupt())
{
ExtOut("Interrupted\n");
return S_FALSE;
}
ULONG64 Offset;
g_ExtRegisters->GetInstructionOffset(&Offset);
DWORD codeType = 0;
ULONG64 base = 0;
CLRDATA_ADDRESS cdaStart = TO_CDADDR(Offset);
DacpMethodDescData MethodDescData;
if (g_ExtSymbols->GetModuleByOffset(Offset, 0, NULL, &base) == S_OK)
{
ULONG64 clrBaseAddr = g_pRuntime->GetModuleAddress();
if(clrBaseAddr == base)
{
ExtOut("Compiled code in CLR\n");
codeType = 4;
}
else
{
ExtOut("Compiled code in module @ 0x%I64x\n", base);
codeType = 8;
}
}
else if (g_sos != NULL || LoadClrDebugDll()==S_OK)
{
CLRDATA_ADDRESS addr;
if(g_sos->GetMethodDescPtrFromIP(cdaStart, &addr) == S_OK)
{
WCHAR wszNameBuffer[1024]; // should be large enough
// get the MethodDesc name
if ((g_sos->GetMethodDescName(addr, 1024, wszNameBuffer, NULL) == S_OK) &&
_wcsncmp(W("DomainBoundILStubClass"), wszNameBuffer, 22)==0)
{
ExtOut("ILStub\n");
codeType = 2;
}
else
{
ExtOut("Jitted code\n");
codeType = 1;
}
}
else
{
ExtOut("Not compiled or jitted, assuming stub\n");
codeType = 16;
}
}
else
{
// not compiled but CLR isn't loaded... some other code generator?
return E_FAIL;
}
if(codeType == 1)
{
return S_OK;
}
else
{
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "thr; .echo wait" ,0);
if (FAILED(Status))
{
ExtOut("Error tracing instruction\n");
return Status;
}
}
}
return Status;
}
#endif // FEATURE_PAL
// This is an experimental and undocumented API that sets a debugger pseudo-register based
// on the type of code at the given IP. It can be used in scripts to keep stepping until certain
// kinds of code have been reached. Presumably its slower than !TraceToCode but at least it
// cancels much better
#ifndef FEATURE_PAL
DECLARE_API(GetCodeTypeFlags)
{
INIT_API();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
_ASSERTE(g_pRuntime != nullptr);
char buffer[100+mdNameLen];
size_t ip;
StringHolder PReg;
CMDValue arg[] = {
// vptr, type
{&ip, COSIZE_T},
{&PReg.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
size_t preg = 1; // by default
if (nArg == 2)
{
preg = GetExpression(PReg.data);
if (preg > 19)
{
ExtOut("Pseudo-register number must be between 0 and 19\n");
return Status;
}
}
sprintf_s(buffer,_countof (buffer),
"r$t%d=0",
preg);
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer ,0);
if (FAILED(Status))
{
ExtOut("Error initialized register $t%d to zero\n", preg);
return Status;
}
ULONG64 base = 0;
CLRDATA_ADDRESS cdaStart = TO_CDADDR(ip);
DWORD codeType = 0;
CLRDATA_ADDRESS addr;
if (g_sos->GetMethodDescPtrFromIP(cdaStart, &addr) == S_OK)
{
WCHAR wszNameBuffer[1024]; // should be large enough
// get the MethodDesc name
if (g_sos->GetMethodDescName(addr, 1024, wszNameBuffer, NULL) == S_OK &&
_wcsncmp(W("DomainBoundILStubClass"), wszNameBuffer, 22)==0)
{
ExtOut("ILStub\n");
codeType = 2;
}
else
{
ExtOut("Jitted code");
codeType = 1;
}
}
else if(g_ExtSymbols->GetModuleByOffset (ip, 0, NULL, &base) == S_OK)
{
ULONG64 clrBaseAddr = g_pRuntime->GetModuleAddress();
if (base == clrBaseAddr)
{
ExtOut("Compiled code in CLR");
codeType = 4;
}
else
{
ExtOut("Compiled code in module @ 0x%I64x\n", base);
codeType = 8;
}
}
else
{
ExtOut("Not compiled or jitted, assuming stub\n");
codeType = 16;
}
sprintf_s(buffer,_countof (buffer),
"r$t%d=%x",
preg, codeType);
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
if (FAILED(Status))
{
ExtOut("Error setting register $t%d\n", preg);
return Status;
}
return Status;
}
#endif // FEATURE_PAL
DECLARE_API(StopOnException)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
char buffer[100+mdNameLen];
BOOL fDerived = FALSE;
BOOL fCreate1 = FALSE;
BOOL fCreate2 = FALSE;
CMDOption option[] = {
// name, vptr, type, hasValue
{"-derived", &fDerived, COBOOL, FALSE}, // catch derived exceptions
{"-create", &fCreate1, COBOOL, FALSE}, // create 1st chance handler
{"-create2", &fCreate2, COBOOL, FALSE}, // create 2nd chance handler
};
StringHolder TypeName,PReg;
CMDValue arg[] = {
// vptr, type
{&TypeName.data, COSTRING},
{&PReg.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if (IsDumpFile())
{
ExtOut("Live debugging session required\n");
return Status;
}
if (nArg < 1 || nArg > 2)
{
ExtOut("usage: StopOnException [-derived] [-create | -create2] <type name>\n");
ExtOut(" [<pseudo-register number for result>]\n");
ExtOut("ex: StopOnException -create System.OutOfMemoryException 1\n");
return Status;
}
size_t preg = 1; // by default
if (nArg == 2)
{
preg = GetExpression(PReg.data);
if (preg > 19)
{
ExtOut("Pseudo-register number must be between 0 and 19\n");
return Status;
}
}
sprintf_s(buffer,_countof (buffer),
"r$t%d=0",
preg);
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
if (FAILED(Status))
{
ExtOut("Error initialized register $t%d to zero\n", preg);
return Status;
}
if (fCreate1 || fCreate2)
{
sprintf_s(buffer,_countof (buffer),
"sxe %s \"!soe %s %s %d;.if(@$t%d==0) {g} .else {.echo '%s hit'}\" %x",
fCreate1 ? "-c" : "-c2",
fDerived ? "-derived" : "",
TypeName.data,
preg,
preg,
TypeName.data,
EXCEPTION_COMPLUS
);
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
if (FAILED(Status))
{
ExtOut("Error setting breakpoint: %s\n", buffer);
return Status;
}
ExtOut("Breakpoint set\n");
return Status;
}
// Find the last thrown exception on this thread.
// Does it match? If so, set the register.
CLRDATA_ADDRESS threadAddr = GetCurrentManagedThread();
DacpThreadData Thread;
if ((threadAddr == NULL) || (Thread.Request(g_sos, threadAddr) != S_OK))
{
ExtOut("The current thread is unmanaged\n");
return Status;
}
TADDR taLTOH;
if (!SafeReadMemory(Thread.lastThrownObjectHandle,
&taLTOH,
sizeof(taLTOH), NULL))
{
ExtOut("There is no current managed exception on this thread\n");
return Status;
}
if (taLTOH)
{
LPWSTR typeNameWide = (LPWSTR)alloca(mdNameLen * sizeof(WCHAR));
MultiByteToWideChar(CP_ACP,0,TypeName.data,-1,typeNameWide,mdNameLen);
TADDR taMT;
if (SafeReadMemory(taLTOH, &taMT, sizeof(taMT), NULL))
{
NameForMT_s (taMT, g_mdName, mdNameLen);
if ((_wcscmp(g_mdName,typeNameWide) == 0) ||
(fDerived && IsDerivedFrom(taMT, typeNameWide)))
{
sprintf_s(buffer,_countof (buffer),
"r$t%d=1",
preg);
Status = g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, buffer, 0);
if (FAILED(Status))
{
ExtOut("Failed to execute the following command: %s\n", buffer);
}
}
}
}
return Status;
}
/**********************************************************************\
* Routine Description: *
* *
* This function finds the size of an object or all roots. *
* *
\**********************************************************************/
DECLARE_API(ObjSize)
{
#ifndef FEATURE_PAL
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
BOOL dml = FALSE;
StringHolder str_Object;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
CMDValue arg[] =
{ // vptr, type
{&str_Object.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
TADDR obj = GetExpression(str_Object.data);
GCRootImpl gcroot;
if (obj == 0)
{
gcroot.ObjSize();
}
else
{
if(!sos::IsObject(obj))
{
ExtOut("%p is not a valid object.\n", SOS_PTR(obj));
return Status;
}
size_t size = gcroot.ObjSize(obj);
TADDR mt = 0;
MOVE(mt, obj);
sos::MethodTable methodTable = mt;
ExtOut("sizeof(%p) = %d (0x%x) bytes (%S)\n", SOS_PTR(obj), size, size, methodTable.GetName());
}
return Status;
#else
return E_NOTIMPL;
#endif
}
#ifndef FEATURE_PAL
// For FEATURE_PAL, MEMORY_BASIC_INFORMATION64 doesn't exist yet. TODO?
DECLARE_API(GCHandleLeaks)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
ExtOut("-------------------------------------------------------------------------------\n");
ExtOut("GCHandleLeaks will report any GCHandles that couldn't be found in memory. \n");
ExtOut("Strong and Pinned GCHandles are reported at this time. You can safely abort the\n");
ExtOut("memory scan with Control-C or Control-Break. \n");
ExtOut("-------------------------------------------------------------------------------\n");
static DWORD_PTR array[2000];
UINT i;
BOOL dml = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"/d", &dml, COBOOL, FALSE},
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
UINT iFinal = FindAllPinnedAndStrong(array,sizeof(array)/sizeof(DWORD_PTR));
ExtOut("Found %d handles:\n",iFinal);
for (i=1;i<=iFinal;i++)
{
ExtOut("%p\t", SOS_PTR(array[i-1]));
if ((i % 4) == 0)
ExtOut("\n");
}
ExtOut("\nSearching memory\n");
// Now search memory for this:
DWORD_PTR buffer[1024];
ULONG64 memCur = 0x0;
BOOL bAbort = FALSE;
//find out memory used by stress log
StressLogMem stressLog;
CLRDATA_ADDRESS StressLogAddress = NULL;
if (LoadClrDebugDll() != S_OK)
{
// Try to find stress log symbols
DWORD_PTR dwAddr = GetValueFromExpression("StressLog::theLog");
StressLogAddress = dwAddr;
g_bDacBroken = TRUE;
}
else
{
if (g_sos->GetStressLogAddress(&StressLogAddress) != S_OK)
{
ExtOut("Unable to find stress log via DAC\n");
}
g_bDacBroken = FALSE;
}
if (stressLog.Init (StressLogAddress, g_ExtData))
{
ExtOut("Reference found in stress log will be ignored\n");
}
else
{
ExtOut("Failed to read whole or part of stress log, some references may come from stress log\n");
}
while (!bAbort)
{
NTSTATUS status;
MEMORY_BASIC_INFORMATION64 memInfo;
status = g_ExtData2->QueryVirtual(UL64_TO_CDA(memCur), &memInfo);
if( !NT_SUCCESS(status) )
{
break;
}
if (memInfo.State == MEM_COMMIT)
{
for (ULONG64 memIter = memCur; memIter < (memCur + memInfo.RegionSize); memIter+=sizeof(buffer))
{
if (IsInterrupt())
{
ExtOut("Quitting at %p due to user abort\n", SOS_PTR(memIter));
bAbort = TRUE;
break;
}
if ((memIter % 0x10000000)==0x0)
{
ExtOut("Searching %p...\n", SOS_PTR(memIter));
}
ULONG size = 0;
HRESULT ret;
ret = g_ExtData->ReadVirtual(UL64_TO_CDA(memIter), buffer, sizeof(buffer), &size);
if (ret == S_OK)
{
for (UINT x=0;x<1024;x++)
{
DWORD_PTR value = buffer[x];
// We don't care about the low bit. Also, the GCHandle class turns on the
// low bit for pinned handles, so without the statement below, we wouldn't
// notice pinned handles.
value = value & ~1;
for (i=0;i<iFinal;i++)
{
ULONG64 addrInDebugee = (ULONG64)memIter+(x*sizeof(DWORD_PTR));
if ((array[i] & ~1) == value)
{
if (stressLog.IsInStressLog (addrInDebugee))
{
ExtOut("Found %p in stress log at location %p, reference not counted\n", SOS_PTR(value), addrInDebugee);
}
else
{
ExtOut("Found %p at location %p\n", SOS_PTR(value), addrInDebugee);
array[i] |= 0x1;
}
}
}
}
}
else
{
if (size > 0)
{
ExtOut("only read %x bytes at %p\n", size, SOS_PTR(memIter));
}
}
}
}
memCur += memInfo.RegionSize;
}
int numNotFound = 0;
for (i=0;i<iFinal;i++)
{
if ((array[i] & 0x1) == 0)
{
numNotFound++;
// ExtOut("WARNING: %p not found\n", SOS_PTR(array[i]));
}
}
if (numNotFound > 0)
{
ExtOut("------------------------------------------------------------------------------\n");
ExtOut("Some handles were not found. If the number of not-found handles grows over the\n");
ExtOut("lifetime of your application, you may have a GCHandle leak. This will cause \n");
ExtOut("the GC Heap to grow larger as objects are being kept alive, referenced only \n");
ExtOut("by the orphaned handle. If the number doesn't grow over time, note that there \n");
ExtOut("may be some noise in this output, as an unmanaged application may be storing \n");
ExtOut("the handle in a non-standard way, perhaps with some bits flipped. The memory \n");
ExtOut("scan wouldn't be able to find those. \n");
ExtOut("------------------------------------------------------------------------------\n");
ExtOut("Didn't find %d handles:\n", numNotFound);
int numPrinted=0;
for (i=0;i<iFinal;i++)
{
if ((array[i] & 0x1) == 0)
{
numPrinted++;
ExtOut("%p\t", SOS_PTR(array[i]));
if ((numPrinted % 4) == 0)
ExtOut("\n");
}
}
ExtOut("\n");
}
else
{
ExtOut("------------------------------------------------------------------------------\n");
ExtOut("All handles found");
if (bAbort)
ExtOut(" even though you aborted.\n");
else
ExtOut(".\n");
ExtOut("A leak may still exist because in a general scan of process memory SOS can't \n");
ExtOut("differentiate between garbage and valid structures, so you may have false \n");
ExtOut("positives. If you still suspect a leak, use this function over time to \n");
ExtOut("identify a possible trend. \n");
ExtOut("------------------------------------------------------------------------------\n");
}
return Status;
}
#endif // FEATURE_PAL
#endif // FEATURE_PAL
class ClrStackImplWithICorDebug
{
private:
static HRESULT DereferenceAndUnboxValue(ICorDebugValue * pValue, ICorDebugValue** ppOutputValue, BOOL * pIsNull = NULL)
{
HRESULT Status = S_OK;
*ppOutputValue = NULL;
if(pIsNull != NULL) *pIsNull = FALSE;
ToRelease<ICorDebugReferenceValue> pReferenceValue;
Status = pValue->QueryInterface(IID_ICorDebugReferenceValue, (LPVOID*) &pReferenceValue);
if (SUCCEEDED(Status))
{
BOOL isNull = FALSE;
IfFailRet(pReferenceValue->IsNull(&isNull));
if(!isNull)
{
ToRelease<ICorDebugValue> pDereferencedValue;
IfFailRet(pReferenceValue->Dereference(&pDereferencedValue));
return DereferenceAndUnboxValue(pDereferencedValue, ppOutputValue);
}
else
{
if(pIsNull != NULL) *pIsNull = TRUE;
*ppOutputValue = pValue;
(*ppOutputValue)->AddRef();
return S_OK;
}
}
ToRelease<ICorDebugBoxValue> pBoxedValue;
Status = pValue->QueryInterface(IID_ICorDebugBoxValue, (LPVOID*) &pBoxedValue);
if (SUCCEEDED(Status))
{
ToRelease<ICorDebugObjectValue> pUnboxedValue;
IfFailRet(pBoxedValue->GetObject(&pUnboxedValue));
return DereferenceAndUnboxValue(pUnboxedValue, ppOutputValue);
}
*ppOutputValue = pValue;
(*ppOutputValue)->AddRef();
return S_OK;
}
static BOOL ShouldExpandVariable(__in_z WCHAR* varToExpand, __in_z WCHAR* currentExpansion)
{
if(currentExpansion == NULL || varToExpand == NULL) return FALSE;
size_t varToExpandLen = _wcslen(varToExpand);
size_t currentExpansionLen = _wcslen(currentExpansion);
if(currentExpansionLen > varToExpandLen) return FALSE;
if(currentExpansionLen < varToExpandLen && varToExpand[currentExpansionLen] != L'.') return FALSE;
if(_wcsncmp(currentExpansion, varToExpand, currentExpansionLen) != 0) return FALSE;
return TRUE;
}
static BOOL IsEnum(ICorDebugValue * pInputValue)
{
ToRelease<ICorDebugValue> pValue;
if(FAILED(DereferenceAndUnboxValue(pInputValue, &pValue, NULL))) return FALSE;
WCHAR baseTypeName[mdNameLen];
ToRelease<ICorDebugValue2> pValue2;
ToRelease<ICorDebugType> pType;
ToRelease<ICorDebugType> pBaseType;
if(FAILED(pValue->QueryInterface(IID_ICorDebugValue2, (LPVOID *) &pValue2))) return FALSE;
if(FAILED(pValue2->GetExactType(&pType))) return FALSE;
if(FAILED(pType->GetBase(&pBaseType)) || pBaseType == NULL) return FALSE;
if(FAILED(GetTypeOfValue(pBaseType, baseTypeName, mdNameLen))) return FALSE;
return (_wcsncmp(baseTypeName, W("System.Enum"), 11) == 0);
}
static HRESULT AddGenericArgs(ICorDebugType * pType, __inout_ecount(typeNameLen) WCHAR* typeName, ULONG typeNameLen)
{
bool isFirst = true;
ToRelease<ICorDebugTypeEnum> pTypeEnum;
if(SUCCEEDED(pType->EnumerateTypeParameters(&pTypeEnum)))
{
ULONG numTypes = 0;
ToRelease<ICorDebugType> pCurrentTypeParam;
while(SUCCEEDED(pTypeEnum->Next(1, &pCurrentTypeParam, &numTypes)))
{
if(numTypes == 0) break;
if(isFirst)
{
isFirst = false;
wcsncat_s(typeName, typeNameLen, W("<"), typeNameLen);
}
else wcsncat_s(typeName, typeNameLen, W(","), typeNameLen);
WCHAR typeParamName[mdNameLen];
typeParamName[0] = L'\0';
GetTypeOfValue(pCurrentTypeParam, typeParamName, mdNameLen);
wcsncat_s(typeName, typeNameLen, typeParamName, typeNameLen);
}
if(!isFirst)
wcsncat_s(typeName, typeNameLen, W(">"), typeNameLen);
}
return S_OK;
}
static HRESULT GetTypeOfValue(ICorDebugType * pType, __inout_ecount(typeNameLen) WCHAR* typeName, ULONG typeNameLen)
{
HRESULT Status = S_OK;
CorElementType corElemType;
IfFailRet(pType->GetType(&corElemType));
switch (corElemType)
{
//List of unsupported CorElementTypes:
//ELEMENT_TYPE_END = 0x0,
//ELEMENT_TYPE_VAR = 0x13, // a class type variable VAR <U1>
//ELEMENT_TYPE_GENERICINST = 0x15, // GENERICINST <generic type> <argCnt> <arg1> ... <argn>
//ELEMENT_TYPE_TYPEDBYREF = 0x16, // TYPEDREF (it takes no args) a typed referece to some other type
//ELEMENT_TYPE_MVAR = 0x1e, // a method type variable MVAR <U1>
//ELEMENT_TYPE_CMOD_REQD = 0x1F, // required C modifier : E_T_CMOD_REQD <mdTypeRef/mdTypeDef>
//ELEMENT_TYPE_CMOD_OPT = 0x20, // optional C modifier : E_T_CMOD_OPT <mdTypeRef/mdTypeDef>
//ELEMENT_TYPE_INTERNAL = 0x21, // INTERNAL <typehandle>
//ELEMENT_TYPE_MAX = 0x22, // first invalid element type
//ELEMENT_TYPE_MODIFIER = 0x40,
//ELEMENT_TYPE_SENTINEL = 0x01 | ELEMENT_TYPE_MODIFIER, // sentinel for varargs
//ELEMENT_TYPE_PINNED = 0x05 | ELEMENT_TYPE_MODIFIER,
//ELEMENT_TYPE_R4_HFA = 0x06 | ELEMENT_TYPE_MODIFIER, // used only internally for R4 HFA types
//ELEMENT_TYPE_R8_HFA = 0x07 | ELEMENT_TYPE_MODIFIER, // used only internally for R8 HFA types
default:
swprintf_s(typeName, typeNameLen, W("(Unhandled CorElementType: 0x%x)\0"), corElemType);
break;
case ELEMENT_TYPE_VALUETYPE:
case ELEMENT_TYPE_CLASS:
{
//Defaults in case we fail...
if(corElemType == ELEMENT_TYPE_VALUETYPE) swprintf_s(typeName, typeNameLen, W("struct\0"));
else swprintf_s(typeName, typeNameLen, W("class\0"));
mdTypeDef typeDef;
ToRelease<ICorDebugClass> pClass;
if(SUCCEEDED(pType->GetClass(&pClass)) && SUCCEEDED(pClass->GetToken(&typeDef)))
{
ToRelease<ICorDebugModule> pModule;
IfFailRet(pClass->GetModule(&pModule));
ToRelease<IUnknown> pMDUnknown;
ToRelease<IMetaDataImport> pMD;
IfFailRet(pModule->GetMetaDataInterface(IID_IMetaDataImport, &pMDUnknown));
IfFailRet(pMDUnknown->QueryInterface(IID_IMetaDataImport, (LPVOID*) &pMD));
if(SUCCEEDED(NameForToken_s(TokenFromRid(typeDef, mdtTypeDef), pMD, g_mdName, mdNameLen, false)))
swprintf_s(typeName, typeNameLen, W("%s\0"), g_mdName);
}
AddGenericArgs(pType, typeName, typeNameLen);
}
break;
case ELEMENT_TYPE_VOID:
swprintf_s(typeName, typeNameLen, W("void\0"));
break;
case ELEMENT_TYPE_BOOLEAN:
swprintf_s(typeName, typeNameLen, W("bool\0"));
break;
case ELEMENT_TYPE_CHAR:
swprintf_s(typeName, typeNameLen, W("char\0"));
break;
case ELEMENT_TYPE_I1:
swprintf_s(typeName, typeNameLen, W("signed byte\0"));
break;
case ELEMENT_TYPE_U1:
swprintf_s(typeName, typeNameLen, W("byte\0"));
break;
case ELEMENT_TYPE_I2:
swprintf_s(typeName, typeNameLen, W("short\0"));
break;
case ELEMENT_TYPE_U2:
swprintf_s(typeName, typeNameLen, W("unsigned short\0"));
break;
case ELEMENT_TYPE_I4:
swprintf_s(typeName, typeNameLen, W("int\0"));
break;
case ELEMENT_TYPE_U4:
swprintf_s(typeName, typeNameLen, W("unsigned int\0"));
break;
case ELEMENT_TYPE_I8:
swprintf_s(typeName, typeNameLen, W("long\0"));
break;
case ELEMENT_TYPE_U8:
swprintf_s(typeName, typeNameLen, W("unsigned long\0"));
break;
case ELEMENT_TYPE_R4:
swprintf_s(typeName, typeNameLen, W("float\0"));
break;
case ELEMENT_TYPE_R8:
swprintf_s(typeName, typeNameLen, W("double\0"));
break;
case ELEMENT_TYPE_OBJECT:
swprintf_s(typeName, typeNameLen, W("object\0"));
break;
case ELEMENT_TYPE_STRING:
swprintf_s(typeName, typeNameLen, W("string\0"));
break;
case ELEMENT_TYPE_I:
swprintf_s(typeName, typeNameLen, W("IntPtr\0"));
break;
case ELEMENT_TYPE_U:
swprintf_s(typeName, typeNameLen, W("UIntPtr\0"));
break;
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_BYREF:
case ELEMENT_TYPE_PTR:
{
ToRelease<ICorDebugType> pFirstParameter;
if(SUCCEEDED(pType->GetFirstTypeParameter(&pFirstParameter)))
GetTypeOfValue(pFirstParameter, typeName, typeNameLen);
else
swprintf_s(typeName, typeNameLen, W("<unknown>\0"));
switch(corElemType)
{
case ELEMENT_TYPE_SZARRAY:
wcsncat_s(typeName, typeNameLen, W("[]\0"), typeNameLen);
return S_OK;
case ELEMENT_TYPE_ARRAY:
{
ULONG32 rank = 0;
pType->GetRank(&rank);
wcsncat_s(typeName, typeNameLen, W("["), typeNameLen);
for(ULONG32 i = 0; i < rank - 1; i++)
{
//
wcsncat_s(typeName, typeNameLen, W(","), typeNameLen);
}
wcsncat_s(typeName, typeNameLen, W("]\0"), typeNameLen);
}
return S_OK;
case ELEMENT_TYPE_BYREF:
wcsncat_s(typeName, typeNameLen, W("&\0"), typeNameLen);
return S_OK;
case ELEMENT_TYPE_PTR:
wcsncat_s(typeName, typeNameLen, W("*\0"), typeNameLen);
return S_OK;
default:
// note we can never reach here as this is a nested switch
// and corElemType can only be one of the values above
break;
}
}
break;
case ELEMENT_TYPE_FNPTR:
swprintf_s(typeName, typeNameLen, W("*(...)\0"));
break;
case ELEMENT_TYPE_TYPEDBYREF:
swprintf_s(typeName, typeNameLen, W("typedbyref\0"));
break;
}
return S_OK;
}
static HRESULT GetTypeOfValue(ICorDebugValue * pValue, __inout_ecount(typeNameLen) WCHAR* typeName, ULONG typeNameLen)
{
HRESULT Status = S_OK;
CorElementType corElemType;
IfFailRet(pValue->GetType(&corElemType));
ToRelease<ICorDebugType> pType;
ToRelease<ICorDebugValue2> pValue2;
if(SUCCEEDED(pValue->QueryInterface(IID_ICorDebugValue2, (void**) &pValue2)) && SUCCEEDED(pValue2->GetExactType(&pType)))
return GetTypeOfValue(pType, typeName, typeNameLen);
else
swprintf_s(typeName, typeNameLen, W("<unknown>\0"));
return S_OK;
}
static HRESULT PrintEnumValue(ICorDebugValue* pInputValue, BYTE* enumValue)
{
HRESULT Status = S_OK;
ToRelease<ICorDebugValue> pValue;
IfFailRet(DereferenceAndUnboxValue(pInputValue, &pValue, NULL));
mdTypeDef currentTypeDef;
ToRelease<ICorDebugClass> pClass;
ToRelease<ICorDebugValue2> pValue2;
ToRelease<ICorDebugType> pType;
ToRelease<ICorDebugModule> pModule;
IfFailRet(pValue->QueryInterface(IID_ICorDebugValue2, (LPVOID *) &pValue2));
IfFailRet(pValue2->GetExactType(&pType));
IfFailRet(pType->GetClass(&pClass));
IfFailRet(pClass->GetModule(&pModule));
IfFailRet(pClass->GetToken(¤tTypeDef));
ToRelease<IUnknown> pMDUnknown;
ToRelease<IMetaDataImport> pMD;
IfFailRet(pModule->GetMetaDataInterface(IID_IMetaDataImport, &pMDUnknown));
IfFailRet(pMDUnknown->QueryInterface(IID_IMetaDataImport, (LPVOID*) &pMD));
//First, we need to figure out the underlying enum type so that we can correctly type cast the raw values of each enum constant
//We get that from the non-static field of the enum variable (I think the field is called __value or something similar)
ULONG numFields = 0;
HCORENUM fEnum = NULL;
mdFieldDef fieldDef;
CorElementType enumUnderlyingType = ELEMENT_TYPE_END;
while(SUCCEEDED(pMD->EnumFields(&fEnum, currentTypeDef, &fieldDef, 1, &numFields)) && numFields != 0)
{
DWORD fieldAttr = 0;
PCCOR_SIGNATURE pSignatureBlob = NULL;
ULONG sigBlobLength = 0;
if(SUCCEEDED(pMD->GetFieldProps(fieldDef, NULL, NULL, 0, NULL, &fieldAttr, &pSignatureBlob, &sigBlobLength, NULL, NULL, NULL)))
{
if((fieldAttr & fdStatic) == 0)
{
CorSigUncompressCallingConv(pSignatureBlob);
enumUnderlyingType = CorSigUncompressElementType(pSignatureBlob);
break;
}
}
}
pMD->CloseEnum(fEnum);
//Now that we know the underlying enum type, let's decode the enum variable into OR-ed, human readable enum contants
fEnum = NULL;
bool isFirst = true;
ULONG64 remainingValue = *((ULONG64*)enumValue);
while(SUCCEEDED(pMD->EnumFields(&fEnum, currentTypeDef, &fieldDef, 1, &numFields)) && numFields != 0)
{
ULONG nameLen = 0;
DWORD fieldAttr = 0;
WCHAR mdName[mdNameLen];
UVCP_CONSTANT pRawValue = NULL;
ULONG rawValueLength = 0;
if(SUCCEEDED(pMD->GetFieldProps(fieldDef, NULL, mdName, mdNameLen, &nameLen, &fieldAttr, NULL, NULL, NULL, &pRawValue, &rawValueLength)))
{
DWORD enumValueRequiredAttributes = fdPublic | fdStatic | fdLiteral | fdHasDefault;
if((fieldAttr & enumValueRequiredAttributes) != enumValueRequiredAttributes)
continue;
ULONG64 currentConstValue = 0;
switch (enumUnderlyingType)
{
case ELEMENT_TYPE_CHAR:
case ELEMENT_TYPE_I1:
currentConstValue = (ULONG64)(*((CHAR*)pRawValue));
break;
case ELEMENT_TYPE_U1:
currentConstValue = (ULONG64)(*((BYTE*)pRawValue));
break;
case ELEMENT_TYPE_I2:
currentConstValue = (ULONG64)(*((SHORT*)pRawValue));
break;
case ELEMENT_TYPE_U2:
currentConstValue = (ULONG64)(*((USHORT*)pRawValue));
break;
case ELEMENT_TYPE_I4:
currentConstValue = (ULONG64)(*((INT32*)pRawValue));
break;
case ELEMENT_TYPE_U4:
currentConstValue = (ULONG64)(*((UINT32*)pRawValue));
break;
case ELEMENT_TYPE_I8:
currentConstValue = (ULONG64)(*((LONG*)pRawValue));
break;
case ELEMENT_TYPE_U8:
currentConstValue = (ULONG64)(*((ULONG*)pRawValue));
break;
case ELEMENT_TYPE_I:
currentConstValue = (ULONG64)(*((int*)pRawValue));
break;
case ELEMENT_TYPE_U:
case ELEMENT_TYPE_R4:
case ELEMENT_TYPE_R8:
// Technically U and the floating-point ones are options in the CLI, but not in the CLS or C#, so these are NYI
default:
currentConstValue = 0;
}
if((currentConstValue == remainingValue) || ((currentConstValue != 0) && ((currentConstValue & remainingValue) == currentConstValue)))
{
remainingValue &= ~currentConstValue;
if(isFirst)
{
ExtOut(" = %S", mdName);
isFirst = false;
}
else ExtOut(" | %S", mdName);
}
}
}
pMD->CloseEnum(fEnum);
return S_OK;
}
static HRESULT PrintStringValue(ICorDebugValue * pValue)
{
HRESULT Status;
ToRelease<ICorDebugStringValue> pStringValue;
IfFailRet(pValue->QueryInterface(IID_ICorDebugStringValue, (LPVOID*) &pStringValue));
ULONG32 cchValue;
IfFailRet(pStringValue->GetLength(&cchValue));
cchValue++; // Allocate one more for null terminator
CQuickString quickString;
quickString.Alloc(cchValue);
ULONG32 cchValueReturned;
IfFailRet(pStringValue->GetString(
cchValue,
&cchValueReturned,
quickString.String()));
ExtOut(" = \"%S\"\n", quickString.String());
return S_OK;
}
static HRESULT PrintSzArrayValue(ICorDebugValue * pValue, ICorDebugILFrame * pILFrame, IMetaDataImport * pMD, int indent, __in_z WCHAR* varToExpand, __inout_ecount(currentExpansionSize) WCHAR* currentExpansion, DWORD currentExpansionSize, int currentFrame)
{
HRESULT Status = S_OK;
ToRelease<ICorDebugArrayValue> pArrayValue;
IfFailRet(pValue->QueryInterface(IID_ICorDebugArrayValue, (LPVOID*) &pArrayValue));
ULONG32 nRank;
IfFailRet(pArrayValue->GetRank(&nRank));
if (nRank != 1)
{
return E_UNEXPECTED;
}
ULONG32 cElements;
IfFailRet(pArrayValue->GetCount(&cElements));
if (cElements == 0) ExtOut(" (empty)\n");
else if (cElements == 1) ExtOut(" (1 element)\n");
else ExtOut(" (%d elements)\n", cElements);
if(!ShouldExpandVariable(varToExpand, currentExpansion)) return S_OK;
size_t currentExpansionLen = _wcslen(currentExpansion);
for (ULONG32 i=0; i < cElements; i++)
{
for(int j = 0; j <= indent; j++) ExtOut(" ");
currentExpansion[currentExpansionLen] = L'\0';
swprintf_s(currentExpansion, mdNameLen, W("%s.[%d]\0"), currentExpansion, i);
bool printed = false;
CorElementType corElemType;
ToRelease<ICorDebugType> pFirstParameter;
ToRelease<ICorDebugValue2> pValue2;
ToRelease<ICorDebugType> pType;
if(SUCCEEDED(pArrayValue->QueryInterface(IID_ICorDebugValue2, (LPVOID *) &pValue2)) && SUCCEEDED(pValue2->GetExactType(&pType)))
{
if(SUCCEEDED(pType->GetFirstTypeParameter(&pFirstParameter)) && SUCCEEDED(pFirstParameter->GetType(&corElemType)))
{
switch(corElemType)
{
//If the array element is something that we can expand with !clrstack, show information about the type of this element
case ELEMENT_TYPE_VALUETYPE:
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_SZARRAY:
{
WCHAR typeOfElement[mdNameLen];
GetTypeOfValue(pFirstParameter, typeOfElement, mdNameLen);
DMLOut(" |- %s = %S", DMLManagedVar(currentExpansion, currentFrame, i), typeOfElement);
printed = true;
}
break;
default:
break;
}
}
}
if(!printed) DMLOut(" |- %s", DMLManagedVar(currentExpansion, currentFrame, i));
ToRelease<ICorDebugValue> pElementValue;
IfFailRet(pArrayValue->GetElementAtPosition(i, &pElementValue));
IfFailRet(PrintValue(pElementValue, pILFrame, pMD, indent + 1, varToExpand, currentExpansion, currentExpansionSize, currentFrame));
}
return S_OK;
}
static HRESULT PrintValue(ICorDebugValue * pInputValue, ICorDebugILFrame * pILFrame, IMetaDataImport * pMD, int indent, __in_z WCHAR* varToExpand, __inout_ecount(currentExpansionSize) WCHAR* currentExpansion, DWORD currentExpansionSize, int currentFrame)
{
HRESULT Status = S_OK;
BOOL isNull = TRUE;
ToRelease<ICorDebugValue> pValue;
IfFailRet(DereferenceAndUnboxValue(pInputValue, &pValue, &isNull));
if(isNull)
{
ExtOut(" = null\n");
return S_OK;
}
ULONG32 cbSize;
IfFailRet(pValue->GetSize(&cbSize));
ArrayHolder<BYTE> rgbValue = new NOTHROW BYTE[cbSize];
if (rgbValue == NULL)
{
ReportOOM();
return E_OUTOFMEMORY;
}
memset(rgbValue.GetPtr(), 0, cbSize * sizeof(BYTE));
CorElementType corElemType;
IfFailRet(pValue->GetType(&corElemType));
if (corElemType == ELEMENT_TYPE_STRING)
{
return PrintStringValue(pValue);
}
if (corElemType == ELEMENT_TYPE_SZARRAY)
{
return PrintSzArrayValue(pValue, pILFrame, pMD, indent, varToExpand, currentExpansion, currentExpansionSize, currentFrame);
}
ToRelease<ICorDebugGenericValue> pGenericValue;
IfFailRet(pValue->QueryInterface(IID_ICorDebugGenericValue, (LPVOID*) &pGenericValue));
IfFailRet(pGenericValue->GetValue((LPVOID) &(rgbValue[0])));
if(IsEnum(pValue))
{
Status = PrintEnumValue(pValue, rgbValue);
ExtOut("\n");
return Status;
}
switch (corElemType)
{
default:
ExtOut(" (Unhandled CorElementType: 0x%x)\n", corElemType);
break;
case ELEMENT_TYPE_PTR:
ExtOut(" = <pointer>\n");
break;
case ELEMENT_TYPE_FNPTR:
{
CORDB_ADDRESS addr = 0;
ToRelease<ICorDebugReferenceValue> pReferenceValue = NULL;
if(SUCCEEDED(pValue->QueryInterface(IID_ICorDebugReferenceValue, (LPVOID*) &pReferenceValue)))
pReferenceValue->GetValue(&addr);
ExtOut(" = <function pointer 0x%x>\n", addr);
}
break;
case ELEMENT_TYPE_VALUETYPE:
case ELEMENT_TYPE_CLASS:
CORDB_ADDRESS addr;
if(SUCCEEDED(pValue->GetAddress(&addr)))
{
ExtOut(" @ 0x%I64x\n", addr);
}
else
{
ExtOut("\n");
}
ProcessFields(pValue, NULL, pILFrame, indent + 1, varToExpand, currentExpansion, currentExpansionSize, currentFrame);
break;
case ELEMENT_TYPE_BOOLEAN:
ExtOut(" = %s\n", rgbValue[0] == 0 ? "false" : "true");
break;
case ELEMENT_TYPE_CHAR:
ExtOut(" = '%C'\n", *(WCHAR *) &(rgbValue[0]));
break;
case ELEMENT_TYPE_I1:
ExtOut(" = %d\n", *(char*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_U1:
ExtOut(" = %d\n", *(unsigned char*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_I2:
ExtOut(" = %hd\n", *(short*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_U2:
ExtOut(" = %hu\n", *(unsigned short*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_I:
ExtOut(" = %d\n", *(int*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_U:
ExtOut(" = %u\n", *(unsigned int*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_I4:
ExtOut(" = %d\n", *(int*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_U4:
ExtOut(" = %u\n", *(unsigned int*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_I8:
ExtOut(" = %I64d\n", *(__int64*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_U8:
ExtOut(" = %I64u\n", *(unsigned __int64*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_R4:
ExtOut(" = %f\n", (double) *(float*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_R8:
ExtOut(" = %f\n", *(double*) &(rgbValue[0]));
break;
case ELEMENT_TYPE_OBJECT:
ExtOut(" = object\n");
break;
// TODO: The following corElementTypes are not yet implemented here. Array
// might be interesting to add, though the others may be of rather limited use:
// ELEMENT_TYPE_ARRAY = 0x14, // MDARRAY <type> <rank> <bcount> <bound1> ... <lbcount> <lb1> ...
//
// ELEMENT_TYPE_GENERICINST = 0x15, // GENERICINST <generic type> <argCnt> <arg1> ... <argn>
}
return S_OK;
}
static HRESULT PrintParameters(BOOL bParams, BOOL bLocals, IMetaDataImport * pMD, mdTypeDef typeDef, mdMethodDef methodDef, ICorDebugILFrame * pILFrame, ICorDebugModule * pModule, __in_z WCHAR* varToExpand, int currentFrame)
{
HRESULT Status = S_OK;
ULONG cParams = 0;
ToRelease<ICorDebugValueEnum> pParamEnum;
IfFailRet(pILFrame->EnumerateArguments(&pParamEnum));
IfFailRet(pParamEnum->GetCount(&cParams));
if (cParams > 0 && bParams)
{
DWORD methAttr = 0;
IfFailRet(pMD->GetMethodProps(methodDef, NULL, NULL, 0, NULL, &methAttr, NULL, NULL, NULL, NULL));
ExtOut("\nPARAMETERS:\n");
for (ULONG i=0; i < cParams; i++)
{
ULONG paramNameLen = 0;
mdParamDef paramDef;
WCHAR paramName[mdNameLen] = W("\0");
if(i == 0 && (methAttr & mdStatic) == 0)
swprintf_s(paramName, mdNameLen, W("this\0"));
else
{
int idx = ((methAttr & mdStatic) == 0)? i : (i + 1);
if(SUCCEEDED(pMD->GetParamForMethodIndex(methodDef, idx, ¶mDef)))
pMD->GetParamProps(paramDef, NULL, NULL, paramName, mdNameLen, ¶mNameLen, NULL, NULL, NULL, NULL);
}
if(_wcslen(paramName) == 0)
swprintf_s(paramName, mdNameLen, W("param_%d\0"), i);
ToRelease<ICorDebugValue> pValue;
ULONG cArgsFetched;
Status = pParamEnum->Next(1, &pValue, &cArgsFetched);
if (FAILED(Status))
{
ExtOut(" + (Error 0x%x retrieving parameter '%S')\n", Status, paramName);
continue;
}
if (Status == S_FALSE)
{
break;
}
WCHAR typeName[mdNameLen] = W("\0");
GetTypeOfValue(pValue, typeName, mdNameLen);
DMLOut(" + %S %s", typeName, DMLManagedVar(paramName, currentFrame, paramName));
ToRelease<ICorDebugReferenceValue> pRefValue;
if(SUCCEEDED(pValue->QueryInterface(IID_ICorDebugReferenceValue, (void**)&pRefValue)) && pRefValue != NULL)
{
BOOL bIsNull = TRUE;
pRefValue->IsNull(&bIsNull);
if(bIsNull)
{
ExtOut(" = null\n");
continue;
}
}
WCHAR currentExpansion[mdNameLen];
swprintf_s(currentExpansion, mdNameLen, W("%s\0"), paramName);
if((Status=PrintValue(pValue, pILFrame, pMD, 0, varToExpand, currentExpansion, mdNameLen, currentFrame)) != S_OK)
ExtOut(" + (Error 0x%x printing parameter %d)\n", Status, i);
}
}
else if (cParams == 0 && bParams)
ExtOut("\nPARAMETERS: (none)\n");
ULONG cLocals = 0;
ToRelease<ICorDebugValueEnum> pLocalsEnum;
IfFailRet(pILFrame->EnumerateLocalVariables(&pLocalsEnum));
IfFailRet(pLocalsEnum->GetCount(&cLocals));
if (cLocals > 0 && bLocals)
{
bool symbolsAvailable = false;
SymbolReader symReader;
if(SUCCEEDED(symReader.LoadSymbols(pMD, pModule)))
symbolsAvailable = true;
ExtOut("\nLOCALS:\n");
for (ULONG i=0; i < cLocals; i++)
{
ULONG paramNameLen = 0;
WCHAR paramName[mdNameLen] = W("\0");
ToRelease<ICorDebugValue> pValue;
if(symbolsAvailable)
{
Status = symReader.GetNamedLocalVariable(pILFrame, i, paramName, mdNameLen, &pValue);
}
else
{
ULONG cArgsFetched;
Status = pLocalsEnum->Next(1, &pValue, &cArgsFetched);
}
if(_wcslen(paramName) == 0)
swprintf_s(paramName, mdNameLen, W("local_%d\0"), i);
if (FAILED(Status))
{
ExtOut(" + (Error 0x%x retrieving local variable '%S')\n", Status, paramName);
continue;
}
if (Status == S_FALSE)
{
break;
}
WCHAR typeName[mdNameLen] = W("\0");
GetTypeOfValue(pValue, typeName, mdNameLen);
DMLOut(" + %S %s", typeName, DMLManagedVar(paramName, currentFrame, paramName));
ToRelease<ICorDebugReferenceValue> pRefValue = NULL;
if(SUCCEEDED(pValue->QueryInterface(IID_ICorDebugReferenceValue, (void**)&pRefValue)) && pRefValue != NULL)
{
BOOL bIsNull = TRUE;
pRefValue->IsNull(&bIsNull);
if(bIsNull)
{
ExtOut(" = null\n");
continue;
}
}
WCHAR currentExpansion[mdNameLen];
swprintf_s(currentExpansion, mdNameLen, W("%s\0"), paramName);
if((Status=PrintValue(pValue, pILFrame, pMD, 0, varToExpand, currentExpansion, mdNameLen, currentFrame)) != S_OK)
ExtOut(" + (Error 0x%x printing local variable %d)\n", Status, i);
}
}
else if (cLocals == 0 && bLocals)
ExtOut("\nLOCALS: (none)\n");
if(bParams || bLocals)
ExtOut("\n");
return S_OK;
}
static HRESULT ProcessFields(ICorDebugValue* pInputValue, ICorDebugType* pTypeCast, ICorDebugILFrame * pILFrame, int indent, __in_z WCHAR* varToExpand, __inout_ecount(currentExpansionSize) WCHAR* currentExpansion, DWORD currentExpansionSize, int currentFrame)
{
if(!ShouldExpandVariable(varToExpand, currentExpansion)) return S_OK;
size_t currentExpansionLen = _wcslen(currentExpansion);
HRESULT Status = S_OK;
BOOL isNull = FALSE;
ToRelease<ICorDebugValue> pValue;
IfFailRet(DereferenceAndUnboxValue(pInputValue, &pValue, &isNull));
if(isNull) return S_OK;
mdTypeDef currentTypeDef;
ToRelease<ICorDebugClass> pClass;
ToRelease<ICorDebugValue2> pValue2;
ToRelease<ICorDebugType> pType;
ToRelease<ICorDebugModule> pModule;
IfFailRet(pValue->QueryInterface(IID_ICorDebugValue2, (LPVOID *) &pValue2));
if(pTypeCast == NULL)
IfFailRet(pValue2->GetExactType(&pType));
else
{
pType = pTypeCast;
pType->AddRef();
}
IfFailRet(pType->GetClass(&pClass));
IfFailRet(pClass->GetModule(&pModule));
IfFailRet(pClass->GetToken(¤tTypeDef));
ToRelease<IUnknown> pMDUnknown;
ToRelease<IMetaDataImport> pMD;
IfFailRet(pModule->GetMetaDataInterface(IID_IMetaDataImport, &pMDUnknown));
IfFailRet(pMDUnknown->QueryInterface(IID_IMetaDataImport, (LPVOID*) &pMD));
WCHAR baseTypeName[mdNameLen] = W("\0");
ToRelease<ICorDebugType> pBaseType;
if(SUCCEEDED(pType->GetBase(&pBaseType)) && pBaseType != NULL && SUCCEEDED(GetTypeOfValue(pBaseType, baseTypeName, mdNameLen)))
{
if(_wcsncmp(baseTypeName, W("System.Enum"), 11) == 0)
return S_OK;
else if(_wcsncmp(baseTypeName, W("System.Object"), 13) != 0 && _wcsncmp(baseTypeName, W("System.ValueType"), 16) != 0)
{
currentExpansion[currentExpansionLen] = W('\0');
wcscat_s(currentExpansion, currentExpansionSize, W(".\0"));
wcscat_s(currentExpansion, currentExpansionSize, W("[basetype]"));
for(int i = 0; i < indent; i++) ExtOut(" ");
DMLOut(" |- %S %s\n", baseTypeName, DMLManagedVar(currentExpansion, currentFrame, W("[basetype]")));
if(ShouldExpandVariable(varToExpand, currentExpansion))
ProcessFields(pInputValue, pBaseType, pILFrame, indent + 1, varToExpand, currentExpansion, currentExpansionSize, currentFrame);
}
}
ULONG numFields = 0;
HCORENUM fEnum = NULL;
mdFieldDef fieldDef;
while(SUCCEEDED(pMD->EnumFields(&fEnum, currentTypeDef, &fieldDef, 1, &numFields)) && numFields != 0)
{
ULONG nameLen = 0;
DWORD fieldAttr = 0;
WCHAR mdName[mdNameLen];
WCHAR typeName[mdNameLen];
if(SUCCEEDED(pMD->GetFieldProps(fieldDef, NULL, mdName, mdNameLen, &nameLen, &fieldAttr, NULL, NULL, NULL, NULL, NULL)))
{
currentExpansion[currentExpansionLen] = W('\0');
wcscat_s(currentExpansion, currentExpansionSize, W(".\0"));
wcscat_s(currentExpansion, currentExpansionSize, mdName);
ToRelease<ICorDebugValue> pFieldVal;
if(fieldAttr & fdLiteral)
{
//TODO: Is it worth it??
//ExtOut(" |- const %S", mdName);
}
else
{
for(int i = 0; i < indent; i++) ExtOut(" ");
if (fieldAttr & fdStatic)
pType->GetStaticFieldValue(fieldDef, pILFrame, &pFieldVal);
else
{
ToRelease<ICorDebugObjectValue> pObjValue;
if (SUCCEEDED(pValue->QueryInterface(IID_ICorDebugObjectValue, (LPVOID*) &pObjValue)))
pObjValue->GetFieldValue(pClass, fieldDef, &pFieldVal);
}
if(pFieldVal != NULL)
{
typeName[0] = L'\0';
GetTypeOfValue(pFieldVal, typeName, mdNameLen);
DMLOut(" |- %S %s", typeName, DMLManagedVar(currentExpansion, currentFrame, mdName));
PrintValue(pFieldVal, pILFrame, pMD, indent, varToExpand, currentExpansion, currentExpansionSize, currentFrame);
}
else if(!(fieldAttr & fdLiteral))
ExtOut(" |- < unknown type > %S\n", mdName);
}
}
}
pMD->CloseEnum(fEnum);
return S_OK;
}
public:
// This is the main worker function used if !clrstack is called with "-i" to indicate
// that the public ICorDebug* should be used instead of the private DAC interface. NOTE:
// Currently only bParams is supported. NOTE: This is a work in progress and the
// following would be good to do:
// * More thorough testing with interesting stacks, especially with transitions into
// and out of managed code.
// * Consider interleaving this code back into the main body of !clrstack if it turns
// out that there's a lot of duplication of code between these two functions.
// (Still unclear how things will look once locals is implemented.)
static HRESULT ClrStackFromPublicInterface(BOOL bParams, BOOL bLocals, BOOL bSuppressLines, __in_z WCHAR* varToExpand = NULL, int onlyShowFrame = -1)
{
HRESULT Status;
ICorDebugProcess* pCorDebugProcess;
IfFailRet(g_pRuntime->GetCorDebugInterface(&pCorDebugProcess));
ExtOut("\n\n\nDumping managed stack and managed variables using ICorDebug.\n");
ExtOut("=============================================================================\n");
ToRelease<ICorDebugThread> pThread;
ToRelease<ICorDebugThread3> pThread3;
ToRelease<ICorDebugStackWalk> pStackWalk;
ULONG ulThreadID = 0;
g_ExtSystem->GetCurrentThreadSystemId(&ulThreadID);
IfFailRet(pCorDebugProcess->GetThread(ulThreadID, &pThread));
IfFailRet(pThread->QueryInterface(IID_ICorDebugThread3, (LPVOID *) &pThread3));
IfFailRet(pThread3->CreateStackWalk(&pStackWalk));
InternalFrameManager internalFrameManager;
IfFailRet(internalFrameManager.Init(pThread3));
#if defined(_AMD64_) || defined(_ARM64_)
ExtOut("%-16s %-16s %s\n", "Child SP", "IP", "Call Site");
#elif defined(_X86_) || defined(_ARM_)
ExtOut("%-8s %-8s %s\n", "Child SP", "IP", "Call Site");
#endif
int currentFrame = -1;
for (Status = S_OK; ; Status = pStackWalk->Next())
{
currentFrame++;
if (Status == CORDBG_S_AT_END_OF_STACK)
{
ExtOut("Stack walk complete.\n");
break;
}
IfFailRet(Status);
if (IsInterrupt())
{
ExtOut("<interrupted>\n");
break;
}
// This is a workaround for a problem in the MacOS DAC/DBI PAL. The PAL exception
// handling is unnecessarily enabled for DLLs and is not properly passing what I
// think is recoverable stack fault on to the OS. Instead it is causing a fault
// GP fault. Putting this struct in the heap works around this fault.
ArrayHolder<CROSS_PLATFORM_CONTEXT> context = new CROSS_PLATFORM_CONTEXT[1];
ULONG32 cbContextActual;
if ((Status = pStackWalk->GetContext(
DT_CONTEXT_FULL,
sizeof(CROSS_PLATFORM_CONTEXT),
&cbContextActual,
(BYTE *)context.GetPtr())) != S_OK)
{
if (FAILED(Status))
{
ExtOut("GetFrameContext failed: %lx\n", Status);
}
break;
}
// First find the info for the Frame object, if the current frame has an associated clr!Frame.
CLRDATA_ADDRESS sp = GetSP(*context.GetPtr());
CLRDATA_ADDRESS ip = GetIP(*context.GetPtr());
ToRelease<ICorDebugFrame> pFrame;
IfFailRet(pStackWalk->GetFrame(&pFrame));
if (Status == S_FALSE)
{
DMLOut("%p %s [NativeStackFrame]\n", SOS_PTR(sp), DMLIP(ip));
continue;
}
// TODO: What about internal frames preceding the above native stack frame?
// Should I just exclude the above native stack frame from the output?
// TODO: Compare caller frame (instead of current frame) against internal frame,
// to deal with issues of current frame's current SP being closer to leaf than
// EE Frames it pushes. By "caller" I mean not just managed caller, but the
// very next non-internal frame dbi would return (native or managed). OR...
// perhaps I should use GetStackRange() instead, to see if the internal frame
// appears leafier than the base-part of the range of the currently iterated
// stack frame? I think I like that better.
_ASSERTE(pFrame != NULL);
IfFailRet(internalFrameManager.PrintPrecedingInternalFrames(pFrame));
// Print the stack and instruction pointers.
DMLOut("%p %s ", SOS_PTR(sp), DMLIP(ip));
ToRelease<ICorDebugRuntimeUnwindableFrame> pRuntimeUnwindableFrame;
Status = pFrame->QueryInterface(IID_ICorDebugRuntimeUnwindableFrame, (LPVOID *) &pRuntimeUnwindableFrame);
if (SUCCEEDED(Status))
{
ExtOut("[RuntimeUnwindableFrame]\n");
continue;
}
// Print the method/Frame info
// TODO: IS THE FOLLOWING NECESSARY, OR AM I GUARANTEED THAT ALL INTERNAL FRAMES
// CAN BE FOUND VIA GetActiveInternalFrames?
ToRelease<ICorDebugInternalFrame> pInternalFrame;
Status = pFrame->QueryInterface(IID_ICorDebugInternalFrame, (LPVOID *) &pInternalFrame);
if (SUCCEEDED(Status))
{
// This is a clr!Frame.
LPCWSTR pwszFrameName = W("TODO: Implement GetFrameName");
ExtOut("[%S: p] ", pwszFrameName);
}
// Print the frame's associated function info, if it has any.
ToRelease<ICorDebugILFrame> pILFrame;
HRESULT hrILFrame = pFrame->QueryInterface(IID_ICorDebugILFrame, (LPVOID*) &pILFrame);
if (SUCCEEDED(hrILFrame))
{
ToRelease<ICorDebugFunction> pFunction;
Status = pFrame->GetFunction(&pFunction);
if (FAILED(Status))
{
// We're on a JITted frame, but there's no Function for it. So it must
// be...
ExtOut("[IL Stub or LCG]\n");
continue;
}
ToRelease<ICorDebugClass> pClass;
ToRelease<ICorDebugModule> pModule;
mdMethodDef methodDef;
IfFailRet(pFunction->GetClass(&pClass));
IfFailRet(pFunction->GetModule(&pModule));
IfFailRet(pFunction->GetToken(&methodDef));
WCHAR wszModuleName[MAX_LONGPATH];
ULONG32 cchModuleNameActual;
IfFailRet(pModule->GetName(_countof(wszModuleName), &cchModuleNameActual, wszModuleName));
ToRelease<IUnknown> pMDUnknown;
ToRelease<IMetaDataImport> pMD;
IfFailRet(pModule->GetMetaDataInterface(IID_IMetaDataImport, &pMDUnknown));
IfFailRet(pMDUnknown->QueryInterface(IID_IMetaDataImport, (LPVOID*) &pMD));
mdTypeDef typeDef;
IfFailRet(pClass->GetToken(&typeDef));
// Note that we don't need to pretty print the class, as class name is
// already printed from GetMethodName below
CQuickBytes functionName;
// TODO: WARNING: GetMethodName() appears to include lots of unexercised
// code, as evidenced by some fundamental bugs I found. It should either be
// thoroughly reviewed, or some other more exercised code path to grab the
// name should be used.
// TODO: If we do stay with GetMethodName, it should be updated to print
// generics properly. Today, it does not show generic type parameters, and
// if any arguments have a generic type, those arguments are just shown as
// "__Canon", even when they're value types.
GetMethodName(methodDef, pMD, &functionName);
DMLOut(DMLManagedVar(W("-a"), currentFrame, (LPWSTR)functionName.Ptr()));
ExtOut(" (%S)\n", wszModuleName);
if (SUCCEEDED(hrILFrame) && (bParams || bLocals))
{
if(onlyShowFrame == -1 || (onlyShowFrame >= 0 && currentFrame == onlyShowFrame))
IfFailRet(PrintParameters(bParams, bLocals, pMD, typeDef, methodDef, pILFrame, pModule, varToExpand, currentFrame));
}
}
}
ExtOut("=============================================================================\n");
return S_OK;
}
};
WString BuildRegisterOutput(const SOSStackRefData &ref, bool printObj)
{
WString res;
if (ref.HasRegisterInformation)
{
WCHAR reg[32];
HRESULT hr = g_sos->GetRegisterName(ref.Register, _countof(reg), reg, NULL);
if (SUCCEEDED(hr))
res = reg;
else
res = W("<unknown register>");
if (ref.Offset)
{
int offset = ref.Offset;
if (offset > 0)
{
res += W("+");
}
else
{
res += W("-");
offset = -offset;
}
res += Hex(offset);
}
res += W(": ");
}
if (ref.Address)
res += WString(Pointer(ref.Address));
if (printObj)
{
if (ref.Address)
res += W(" -> ");
res += WString(ObjectPtr(ref.Object));
}
if (ref.Flags & SOSRefPinned)
{
res += W(" (pinned)");
}
if (ref.Flags & SOSRefInterior)
{
res += W(" (interior)");
}
return res;
}
void PrintRef(const SOSStackRefData &ref, TableOutput &out)
{
WString res = BuildRegisterOutput(ref);
if (ref.Object && (ref.Flags & SOSRefInterior) == 0)
{
WCHAR type[128];
sos::BuildTypeWithExtraInfo(TO_TADDR(ref.Object), _countof(type), type);
res += WString(W(" - ")) + type;
}
out.WriteColumn(2, res);
}
class ClrStackImpl
{
public:
static void PrintThread(ULONG osID, BOOL bParams, BOOL bLocals, BOOL bSuppressLines, BOOL bGC, BOOL bFull, BOOL bDisplayRegVals)
{
_ASSERTE(g_targetMachine != nullptr);
// Symbols variables
ULONG symlines = 0; // symlines will be non-zero only if SYMOPT_LOAD_LINES was set in the symbol options
if (!bSuppressLines && SUCCEEDED(g_ExtSymbols->GetSymbolOptions(&symlines)))
{
symlines &= SYMOPT_LOAD_LINES;
}
if (symlines == 0)
bSuppressLines = TRUE;
ToRelease<IXCLRDataStackWalk> pStackWalk;
HRESULT hr = CreateStackWalk(osID, &pStackWalk);
if (FAILED(hr) || pStackWalk == NULL)
{
ExtOut("Failed to start stack walk: %lx\n", hr);
return;
}
#ifdef DEBUG_STACK_CONTEXT
PDEBUG_STACK_FRAME currentNativeFrame = NULL;
ULONG numNativeFrames = 0;
if (bFull)
{
hr = GetContextStackTrace(osID, &numNativeFrames);
if (FAILED(hr))
{
ExtOut("Failed to get native stack frames: %lx\n", hr);
return;
}
currentNativeFrame = &g_Frames[0];
}
#endif // DEBUG_STACK_CONTEXT
unsigned int refCount = 0, errCount = 0;
ArrayHolder<SOSStackRefData> pRefs = NULL;
ArrayHolder<SOSStackRefError> pErrs = NULL;
if (bGC && FAILED(GetGCRefs(osID, &pRefs, &refCount, &pErrs, &errCount)))
refCount = 0;
TableOutput out(3, POINTERSIZE_HEX, AlignRight);
out.WriteRow("Child SP", "IP", "Call Site");
int frameNumber = 0;
int internalFrames = 0;
do
{
if (IsInterrupt())
{
ExtOut("<interrupted>\n");
break;
}
CLRDATA_ADDRESS ip = 0, sp = 0;
hr = GetFrameLocation(pStackWalk, &ip, &sp);
if (SUCCEEDED(hr))
{
DacpFrameData FrameData;
HRESULT frameDataResult = FrameData.Request(pStackWalk);
if (SUCCEEDED(frameDataResult) && FrameData.frameAddr)
sp = FrameData.frameAddr;
#ifdef DEBUG_STACK_CONTEXT
while ((numNativeFrames > 0) && (currentNativeFrame->StackOffset <= sp))
{
if (currentNativeFrame->StackOffset != sp)
{
PrintNativeStackFrame(out, currentNativeFrame, bSuppressLines);
}
currentNativeFrame++;
numNativeFrames--;
}
#endif // DEBUG_STACK_CONTEXT
// Print the stack pointer.
out.WriteColumn(0, sp);
// Print the method/Frame info
if (SUCCEEDED(frameDataResult) && FrameData.frameAddr)
{
internalFrames++;
// Skip the instruction pointer because it doesn't really mean anything for method frames
out.WriteColumn(1, bFull ? String("") : NativePtr(ip));
// This is a clr!Frame.
out.WriteColumn(2, GetFrameFromAddress(TO_TADDR(FrameData.frameAddr), pStackWalk, bFull));
// Print out gc references for the Frame.
for (unsigned int i = 0; i < refCount; ++i)
if (pRefs[i].Source == sp)
PrintRef(pRefs[i], out);
// Print out an error message if we got one.
for (unsigned int i = 0; i < errCount; ++i)
if (pErrs[i].Source == sp)
out.WriteColumn(2, "Failed to enumerate GC references.");
}
else
{
// To get the source line number of the actual code that threw an exception, the IP needs
// to be adjusted in certain cases.
//
// The IP of stack frame points to either:
//
// 1) Currently executing instruction (if you hit a breakpoint or are single stepping through).
// 2) The instruction that caused a hardware exception (div by zero, null ref, etc).
// 3) The instruction after the call to an internal runtime function (FCALL like IL_Throw,
// JIT_OverFlow, etc.) that caused a software exception.
// 4) The instruction after the call to a managed function (non-leaf node).
//
// #3 and #4 are the cases that need IP adjusted back because they point after the call instruction
// and may point to the next (incorrect) IL instruction/source line. We distinguish these from #1
// or #2 by either being non-leaf node stack frame (#4) or the present of an internal stack frame (#3).
bool bAdjustIPForLineNumber = frameNumber > 0 || internalFrames > 0;
frameNumber++;
// The unmodified IP is displayed which points after the exception in most cases. This means that the
// printed IP and the printed line number often will not map to one another and this is intentional.
out.WriteColumn(1, InstructionPtr(ip));
out.WriteColumn(2, MethodNameFromIP(ip, bSuppressLines, bFull, bFull, bAdjustIPForLineNumber));
// Print out gc references. refCount will be zero if bGC is false (or if we
// failed to fetch gc reference information).
for (unsigned int i = 0; i < refCount; ++i)
if (pRefs[i].Source == ip && pRefs[i].StackPointer == sp)
PrintRef(pRefs[i], out);
// Print out an error message if we got one.
for (unsigned int i = 0; i < errCount; ++i)
if (pErrs[i].Source == sp)
out.WriteColumn(2, "Failed to enumerate GC references.");
if (bParams || bLocals)
PrintArgsAndLocals(pStackWalk, bParams, bLocals);
}
}
if (bDisplayRegVals)
PrintManagedFrameContext(pStackWalk);
} while (pStackWalk->Next() == S_OK);
#ifdef DEBUG_STACK_CONTEXT
while (numNativeFrames > 0)
{
PrintNativeStackFrame(out, currentNativeFrame, bSuppressLines);
currentNativeFrame++;
numNativeFrames--;
}
#endif // DEBUG_STACK_CONTEXT
}
static HRESULT PrintManagedFrameContext(IXCLRDataStackWalk *pStackWalk)
{
CROSS_PLATFORM_CONTEXT context;
HRESULT hr = pStackWalk->GetContext(DT_CONTEXT_FULL, g_targetMachine->GetContextSize(), NULL, (BYTE *)&context);
if (FAILED(hr))
{
ExtOut("GetFrameContext failed: %lx\n", hr);
return hr;
}
if (hr == S_FALSE)
{
// GetFrameContext returns S_FALSE if the frame iterator is invalid. That's basically an error for us.
return E_FAIL;
}
#if defined(SOS_TARGET_AMD64)
String outputFormat3 = " %3s=%016x %3s=%016x %3s=%016x\n";
String outputFormat2 = " %3s=%016x %3s=%016x\n";
ExtOut(outputFormat3, "rsp", context.Amd64Context.Rsp, "rbp", context.Amd64Context.Rbp, "rip", context.Amd64Context.Rip);
ExtOut(outputFormat3, "rax", context.Amd64Context.Rax, "rbx", context.Amd64Context.Rbx, "rcx", context.Amd64Context.Rcx);
ExtOut(outputFormat3, "rdx", context.Amd64Context.Rdx, "rsi", context.Amd64Context.Rsi, "rdi", context.Amd64Context.Rdi);
ExtOut(outputFormat3, "r8", context.Amd64Context.R8, "r9", context.Amd64Context.R9, "r10", context.Amd64Context.R10);
ExtOut(outputFormat3, "r11", context.Amd64Context.R11, "r12", context.Amd64Context.R12, "r13", context.Amd64Context.R13);
ExtOut(outputFormat2, "r14", context.Amd64Context.R14, "r15", context.Amd64Context.R15);
#elif defined(SOS_TARGET_X86)
String outputFormat3 = " %3s=%08x %3s=%08x %3s=%08x\n";
String outputFormat2 = " %3s=%08x %3s=%08x\n";
ExtOut(outputFormat3, "esp", context.X86Context.Esp, "ebp", context.X86Context.Ebp, "eip", context.X86Context.Eip);
ExtOut(outputFormat3, "eax", context.X86Context.Eax, "ebx", context.X86Context.Ebx, "ecx", context.X86Context.Ecx);
ExtOut(outputFormat3, "edx", context.X86Context.Edx, "esi", context.X86Context.Esi, "edi", context.X86Context.Edi);
#elif defined(SOS_TARGET_ARM)
String outputFormat3 = " %3s=%08x %3s=%08x %3s=%08x\n";
String outputFormat2 = " %s=%08x %s=%08x\n";
String outputFormat1 = " %s=%08x\n";
ExtOut(outputFormat3, "r0", context.ArmContext.R0, "r1", context.ArmContext.R1, "r2", context.ArmContext.R2);
ExtOut(outputFormat3, "r3", context.ArmContext.R3, "r4", context.ArmContext.R4, "r5", context.ArmContext.R5);
ExtOut(outputFormat3, "r6", context.ArmContext.R6, "r7", context.ArmContext.R7, "r8", context.ArmContext.R8);
ExtOut(outputFormat3, "r9", context.ArmContext.R9, "r10", context.ArmContext.R10, "r11", context.ArmContext.R11);
ExtOut(outputFormat1, "r12", context.ArmContext.R12);
ExtOut(outputFormat3, "sp", context.ArmContext.Sp, "lr", context.ArmContext.Lr, "pc", context.ArmContext.Pc);
ExtOut(outputFormat2, "cpsr", context.ArmContext.Cpsr, "fpsr", context.ArmContext.Fpscr);
#elif defined(SOS_TARGET_ARM64)
String outputXRegFormat3 = " x%d=%016x x%d=%016x x%d=%016x\n";
String outputXRegFormat1 = " x%d=%016x\n";
String outputFormat3 = " %s=%016x %s=%016x %s=%016x\n";
String outputFormat2 = " %s=%08x %s=%08x\n";
DWORD64 *X = context.Arm64Context.X;
for (int i = 0; i < 9; i++)
{
ExtOut(outputXRegFormat3, i + 0, X[i + 0], i + 1, X[i + 1], i + 2, X[i + 2]);
}
ExtOut(outputXRegFormat1, 28, X[28]);
ExtOut(outputFormat3, "sp", context.ArmContext.Sp, "lr", context.ArmContext.Lr, "pc", context.ArmContext.Pc);
ExtOut(outputFormat2, "cpsr", context.ArmContext.Cpsr, "fpsr", context.ArmContext.Fpscr);
#else
ExtOut("Can't display register values for this platform\n");
#endif
return S_OK;
}
static HRESULT GetFrameLocation(IXCLRDataStackWalk *pStackWalk, CLRDATA_ADDRESS *ip, CLRDATA_ADDRESS *sp)
{
CROSS_PLATFORM_CONTEXT context;
HRESULT hr = pStackWalk->GetContext(DT_CONTEXT_FULL, g_targetMachine->GetContextSize(), NULL, (BYTE *)&context);
if (FAILED(hr))
{
ExtOut("GetFrameContext failed: %lx\n", hr);
return hr;
}
if (hr == S_FALSE)
{
// GetFrameContext returns S_FALSE if the frame iterator is invalid. That's basically an error for us.
return E_FAIL;
}
// First find the info for the Frame object, if the current frame has an associated clr!Frame.
*ip = GetIP(context);
*sp = GetSP(context);
if (IsDbgTargetArm())
*ip = *ip & ~THUMB_CODE;
return S_OK;
}
static void PrintNativeStackFrame(TableOutput out, PDEBUG_STACK_FRAME frame, BOOL bSuppressLines)
{
char filename[MAX_LONGPATH + 1];
char symbol[1024];
ULONG64 displacement;
ULONG64 ip = frame->InstructionOffset;
out.WriteColumn(0, frame->StackOffset);
out.WriteColumn(1, NativePtr(ip));
HRESULT hr = g_ExtSymbols->GetNameByOffset(TO_CDADDR(ip), symbol, _countof(symbol), NULL, &displacement);
if (SUCCEEDED(hr) && symbol[0] != '\0')
{
String frameOutput;
frameOutput += symbol;
if (displacement)
{
frameOutput += " + ";
frameOutput += Decimal(displacement);
}
if (!bSuppressLines)
{
ULONG line;
hr = g_ExtSymbols->GetLineByOffset(TO_CDADDR(ip), &line, filename, _countof(filename), NULL, NULL);
if (SUCCEEDED(hr))
{
frameOutput += " at ";
frameOutput += filename;
frameOutput += ":";
frameOutput += Decimal(line);
}
}
out.WriteColumn(2, frameOutput);
}
else
{
out.WriteColumn(2, "");
}
}
static void PrintCurrentThread(BOOL bParams, BOOL bLocals, BOOL bSuppressLines, BOOL bGC, BOOL bNative, BOOL bDisplayRegVals)
{
ULONG id = 0;
ULONG osid = 0;
g_ExtSystem->GetCurrentThreadSystemId(&osid);
ExtOut("OS Thread Id: 0x%x ", osid);
g_ExtSystem->GetCurrentThreadId(&id);
ExtOut("(%d)\n", id);
PrintThread(osid, bParams, bLocals, bSuppressLines, bGC, bNative, bDisplayRegVals);
}
static void PrintAllThreads(BOOL bParams, BOOL bLocals, BOOL bSuppressLines, BOOL bGC, BOOL bNative, BOOL bDisplayRegVals)
{
HRESULT Status;
DacpThreadStoreData ThreadStore;
if ((Status = ThreadStore.Request(g_sos)) != S_OK)
{
ExtErr("Failed to request ThreadStore\n");
return;
}
DacpThreadData Thread;
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread != 0)
{
if (IsInterrupt())
break;
if ((Status = Thread.Request(g_sos, CurThread)) != S_OK)
{
ExtErr("Failed to request thread at %p\n", CurThread);
return;
}
if (Thread.osThreadId != 0)
{
ExtOut("OS Thread Id: 0x%x\n", Thread.osThreadId);
PrintThread(Thread.osThreadId, bParams, bLocals, bSuppressLines, bGC, bNative, bDisplayRegVals);
}
CurThread = Thread.nextThread;
}
}
private:
static HRESULT CreateStackWalk(ULONG osID, IXCLRDataStackWalk **ppStackwalk)
{
HRESULT hr = S_OK;
ToRelease<IXCLRDataTask> pTask;
if ((hr = g_clrData->GetTaskByOSThreadID(osID, &pTask)) != S_OK)
{
ExtOut("Unable to walk the managed stack. The current thread is likely not a \n");
ExtOut("managed thread. You can run " SOSThreads " to get a list of managed threads in\n");
ExtOut("the process\n");
return hr;
}
return pTask->CreateStackWalk(CLRDATA_SIMPFRAME_UNRECOGNIZED |
CLRDATA_SIMPFRAME_MANAGED_METHOD |
CLRDATA_SIMPFRAME_RUNTIME_MANAGED_CODE |
CLRDATA_SIMPFRAME_RUNTIME_UNMANAGED_CODE,
ppStackwalk);
}
/* Prints the args and locals of for a thread's stack.
* Params:
* pStackWalk - the stack we are printing
* bArgs - whether to print args
* bLocals - whether to print locals
*/
static void PrintArgsAndLocals(IXCLRDataStackWalk *pStackWalk, BOOL bArgs, BOOL bLocals)
{
ToRelease<IXCLRDataFrame> pFrame;
ToRelease<IXCLRDataValue> pVal;
ULONG32 argCount = 0;
ULONG32 localCount = 0;
HRESULT hr = S_OK;
hr = pStackWalk->GetFrame(&pFrame);
// Print arguments
if (SUCCEEDED(hr) && bArgs)
hr = pFrame->GetNumArguments(&argCount);
if (SUCCEEDED(hr) && bArgs)
hr = ShowArgs(argCount, pFrame, pVal);
// Print locals
if (SUCCEEDED(hr) && bLocals)
hr = pFrame->GetNumLocalVariables(&localCount);
if (SUCCEEDED(hr) && bLocals)
ShowLocals(localCount, pFrame, pVal);
ExtOut("\n");
}
/* Displays the arguments to a function
* Params:
* argy - the number of arguments the function has
* pFramey - the frame we are inspecting
* pVal - a pointer to the CLRDataValue we use to query for info about the args
*/
static HRESULT ShowArgs(ULONG32 argy, IXCLRDataFrame *pFramey, IXCLRDataValue *pVal)
{
CLRDATA_ADDRESS addr = 0;
BOOL fPrintedLocation = FALSE;
ULONG64 outVar = 0;
ULONG32 tmp;
HRESULT hr = S_OK;
ArrayHolder<WCHAR> argName = new NOTHROW WCHAR[mdNameLen];
if (!argName)
{
ReportOOM();
return E_FAIL;
}
for (ULONG32 i=0; i < argy; i++)
{
if (i == 0)
{
ExtOut(" PARAMETERS:\n");
}
hr = pFramey->GetArgumentByIndex(i,
&pVal,
mdNameLen,
&tmp,
argName);
if (FAILED(hr))
return hr;
ExtOut(" ");
if (argName[0] != L'\0')
{
ExtOut("%S ", argName.GetPtr());
}
// At times we cannot print the value of a parameter (most
// common case being a non-primitive value type). In these
// cases we need to print the location of the parameter,
// so that we can later examine it (e.g. using !dumpvc)
{
bool result = SUCCEEDED(pVal->GetNumLocations(&tmp)) && tmp == 1;
if (result)
result = SUCCEEDED(pVal->GetLocationByIndex(0, &tmp, &addr));
if (result)
{
if (tmp == CLRDATA_VLOC_REGISTER)
{
ExtOut("(<CLR reg>) ");
}
else
{
ExtOut("(0x%p) ", SOS_PTR(CDA_TO_UL64(addr)));
}
fPrintedLocation = TRUE;
}
}
if (argName[0] != L'\0' || fPrintedLocation)
{
ExtOut("= ");
}
if (HRESULT_CODE(pVal->GetBytes(0,&tmp,NULL)) == ERROR_BUFFER_OVERFLOW)
{
ArrayHolder<BYTE> pByte = new NOTHROW BYTE[tmp + 1];
if (pByte == NULL)
{
ReportOOM();
return E_FAIL;
}
hr = pVal->GetBytes(tmp, &tmp, pByte);
if (FAILED(hr))
{
ExtOut("<unable to retrieve data>\n");
}
else
{
switch(tmp)
{
case 1: outVar = *((BYTE *)pByte.GetPtr()); break;
case 2: outVar = *((short *)pByte.GetPtr()); break;
case 4: outVar = *((DWORD *)pByte.GetPtr()); break;
case 8: outVar = *((ULONG64 *)pByte.GetPtr()); break;
default: outVar = 0;
}
if (outVar)
DMLOut("0x%s\n", DMLObject(outVar));
else
ExtOut("0x%p\n", SOS_PTR(outVar));
}
}
else
{
ExtOut("<no data>\n");
}
pVal->Release();
}
return S_OK;
}
/* Prints the locals of a frame.
* Params:
* localy - the number of locals in the frame
* pFramey - the frame we are inspecting
* pVal - a pointer to the CLRDataValue we use to query for info about the args
*/
static HRESULT ShowLocals(ULONG32 localy, IXCLRDataFrame *pFramey, IXCLRDataValue *pVal)
{
for (ULONG32 i=0; i < localy; i++)
{
if (i == 0)
ExtOut(" LOCALS:\n");
HRESULT hr;
ExtOut(" ");
// local names don't work in Whidbey.
hr = pFramey->GetLocalVariableByIndex(i, &pVal, mdNameLen, NULL, g_mdName);
if (FAILED(hr))
{
return hr;
}
ULONG32 numLocations;
if (SUCCEEDED(pVal->GetNumLocations(&numLocations)) &&
numLocations == 1)
{
ULONG32 flags;
CLRDATA_ADDRESS addr;
if (SUCCEEDED(pVal->GetLocationByIndex(0, &flags, &addr)))
{
if (flags == CLRDATA_VLOC_REGISTER)
{
ExtOut("<CLR reg> ");
}
else
{
ExtOut("0x%p ", SOS_PTR(CDA_TO_UL64(addr)));
}
}
// Can I get a name for the item?
ExtOut("= ");
}
ULONG32 dwSize = 0;
hr = pVal->GetBytes(0, &dwSize, NULL);
if (HRESULT_CODE(hr) == ERROR_BUFFER_OVERFLOW)
{
ArrayHolder<BYTE> pByte = new NOTHROW BYTE[dwSize + 1];
if (pByte == NULL)
{
ReportOOM();
return E_FAIL;
}
hr = pVal->GetBytes(dwSize,&dwSize,pByte);
if (FAILED(hr))
{
ExtOut("<unable to retrieve data>\n");
}
else
{
ULONG64 outVar = 0;
switch(dwSize)
{
case 1: outVar = *((BYTE *) pByte.GetPtr()); break;
case 2: outVar = *((short *) pByte.GetPtr()); break;
case 4: outVar = *((DWORD *) pByte.GetPtr()); break;
case 8: outVar = *((ULONG64 *) pByte.GetPtr()); break;
default: outVar = 0;
}
if (outVar)
DMLOut("0x%s\n", DMLObject(outVar));
else
ExtOut("0x%p\n", SOS_PTR(outVar));
}
}
else
{
ExtOut("<no data>\n");
}
pVal->Release();
}
return S_OK;
}
};
#ifndef FEATURE_PAL
WatchCmd g_watchCmd;
// The grand new !Watch command, private to Apollo for now
DECLARE_API(Watch)
{
INIT_API_NOEE();
BOOL bExpression = FALSE;
StringHolder addExpression;
StringHolder aExpression;
StringHolder saveName;
StringHolder sName;
StringHolder expression;
StringHolder filterName;
StringHolder renameOldName;
size_t expandIndex = -1;
size_t removeIndex = -1;
BOOL clear = FALSE;
size_t nArg = 0;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-add", &addExpression.data, COSTRING, TRUE},
{"-a", &aExpression.data, COSTRING, TRUE},
{"-save", &saveName.data, COSTRING, TRUE},
{"-s", &sName.data, COSTRING, TRUE},
{"-clear", &clear, COBOOL, FALSE},
{"-c", &clear, COBOOL, FALSE},
{"-expand", &expandIndex, COSIZE_T, TRUE},
{"-filter", &filterName.data, COSTRING, TRUE},
{"-r", &removeIndex, COSIZE_T, TRUE},
{"-remove", &removeIndex, COSIZE_T, TRUE},
{"-rename", &renameOldName.data, COSTRING, TRUE},
};
CMDValue arg[] =
{ // vptr, type
{&expression.data, COSTRING}
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
if(addExpression.data != NULL || aExpression.data != NULL)
{
WCHAR pAddExpression[MAX_EXPRESSION];
swprintf_s(pAddExpression, MAX_EXPRESSION, W("%S"), addExpression.data != NULL ? addExpression.data : aExpression.data);
Status = g_watchCmd.Add(pAddExpression);
}
else if(removeIndex != -1)
{
if(removeIndex <= 0)
{
ExtOut("Index must be a postive decimal number\n");
}
else
{
Status = g_watchCmd.Remove((int)removeIndex);
if(Status == S_OK)
ExtOut("Watch expression #%d has been removed\n", removeIndex);
else if(Status == S_FALSE)
ExtOut("There is no watch expression with index %d\n", removeIndex);
else
ExtOut("Unknown failure 0x%x removing watch expression\n", Status);
}
}
else if(saveName.data != NULL || sName.data != NULL)
{
WCHAR pSaveName[MAX_EXPRESSION];
swprintf_s(pSaveName, MAX_EXPRESSION, W("%S"), saveName.data != NULL ? saveName.data : sName.data);
Status = g_watchCmd.SaveList(pSaveName);
}
else if(clear)
{
g_watchCmd.Clear();
}
else if(renameOldName.data != NULL)
{
if(nArg != 1)
{
ExtOut("Must provide an old and new name. Usage: !watch -rename <old_name> <new_name>.\n");
return S_FALSE;
}
WCHAR pOldName[MAX_EXPRESSION];
swprintf_s(pOldName, MAX_EXPRESSION, W("%S"), renameOldName.data);
WCHAR pNewName[MAX_EXPRESSION];
swprintf_s(pNewName, MAX_EXPRESSION, W("%S"), expression.data);
g_watchCmd.RenameList(pOldName, pNewName);
}
// print the tree, possibly with filtering and/or expansion
else if(expandIndex != -1 || expression.data == NULL)
{
WCHAR pExpression[MAX_EXPRESSION];
pExpression[0] = '\0';
if(expandIndex != -1)
{
if(expression.data != NULL)
{
swprintf_s(pExpression, MAX_EXPRESSION, W("%S"), expression.data);
}
else
{
ExtOut("No expression was provided. Usage !watch -expand <index> <expression>\n");
return S_FALSE;
}
}
WCHAR pFilterName[MAX_EXPRESSION];
pFilterName[0] = '\0';
if(filterName.data != NULL)
{
swprintf_s(pFilterName, MAX_EXPRESSION, W("%S"), filterName.data);
}
g_watchCmd.Print((int)expandIndex, pExpression, pFilterName);
}
else
{
ExtOut("Unrecognized argument: %s\n", expression.data);
}
return Status;
}
#endif // FEATURE_PAL
DECLARE_API(ClrStack)
{
INIT_API();
BOOL bAll = FALSE;
BOOL bParams = FALSE;
BOOL bLocals = FALSE;
BOOL bSuppressLines = FALSE;
BOOL bICorDebug = FALSE;
BOOL bGC = FALSE;
BOOL dml = FALSE;
BOOL bFull = FALSE;
BOOL bDisplayRegVals = FALSE;
BOOL bAllThreads = FALSE;
DWORD frameToDumpVariablesFor = -1;
StringHolder cvariableName;
ArrayHolder<WCHAR> wvariableName = new NOTHROW WCHAR[mdNameLen];
if (wvariableName == NULL)
{
ReportOOM();
return E_OUTOFMEMORY;
}
memset(wvariableName, 0, sizeof(wvariableName));
size_t nArg = 0;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-a", &bAll, COBOOL, FALSE},
{"-all", &bAllThreads, COBOOL, FALSE},
{"-p", &bParams, COBOOL, FALSE},
{"-l", &bLocals, COBOOL, FALSE},
{"-n", &bSuppressLines, COBOOL, FALSE},
{"-i", &bICorDebug, COBOOL, FALSE},
{"-gc", &bGC, COBOOL, FALSE},
{"-f", &bFull, COBOOL, FALSE},
{"-r", &bDisplayRegVals, COBOOL, FALSE },
#ifndef FEATURE_PAL
{"/d", &dml, COBOOL, FALSE},
#endif
};
CMDValue arg[] =
{ // vptr, type
{&cvariableName.data, COSTRING},
{&frameToDumpVariablesFor, COSIZE_T},
};
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &nArg))
{
return Status;
}
EnableDMLHolder dmlHolder(dml);
if (bAll || bParams || bLocals)
{
// No parameter or local supports for minidump case!
MINIDUMP_NOT_SUPPORTED();
}
if (bAll)
{
bParams = bLocals = TRUE;
}
if (bICorDebug)
{
if(nArg > 0)
{
bool firstParamIsNumber = true;
for(DWORD i = 0; i < strlen(cvariableName.data); i++)
firstParamIsNumber = firstParamIsNumber && isdigit(cvariableName.data[i]);
if(firstParamIsNumber && nArg == 1)
{
frameToDumpVariablesFor = (DWORD)GetExpression(cvariableName.data);
cvariableName.data[0] = '\0';
}
}
if(cvariableName.data != NULL && strlen(cvariableName.data) > 0)
swprintf_s(wvariableName, mdNameLen, W("%S\0"), cvariableName.data);
if(_wcslen(wvariableName) > 0)
bParams = bLocals = TRUE;
EnableDMLHolder dmlHolder(TRUE);
return ClrStackImplWithICorDebug::ClrStackFromPublicInterface(bParams, bLocals, FALSE, wvariableName, frameToDumpVariablesFor);
}
if (bAllThreads) {
ClrStackImpl::PrintAllThreads(bParams, bLocals, bSuppressLines, bGC, bFull, bDisplayRegVals);
}
else {
ClrStackImpl::PrintCurrentThread(bParams, bLocals, bSuppressLines, bGC, bFull, bDisplayRegVals);
}
return S_OK;
}
#ifndef FEATURE_PAL
BOOL IsMemoryInfoAvailable()
{
ULONG Class;
ULONG Qualifier;
g_ExtControl->GetDebuggeeType(&Class,&Qualifier);
if (Qualifier == DEBUG_DUMP_SMALL)
{
g_ExtControl->GetDumpFormatFlags(&Qualifier);
if ((Qualifier & DEBUG_FORMAT_USER_SMALL_FULL_MEMORY) == 0)
{
if ((Qualifier & DEBUG_FORMAT_USER_SMALL_FULL_MEMORY_INFO) == 0)
{
return FALSE;
}
}
}
return TRUE;
}
DECLARE_API( VMMap )
{
INIT_API();
if (IsMiniDumpFile() || !IsMemoryInfoAvailable())
{
ExtOut("!VMMap requires a full memory dump (.dump /ma) or a live process.\n");
}
else
{
vmmap();
}
return Status;
} // DECLARE_API( vmmap )
#endif // FEATURE_PAL
DECLARE_API(SOSFlush)
{
INIT_API_EXT();
Runtime::Flush();
#ifdef FEATURE_PAL
FlushMetadataRegions();
#endif
return Status;
}
#ifndef FEATURE_PAL
DECLARE_API( VMStat )
{
INIT_API();
if (IsMiniDumpFile() || !IsMemoryInfoAvailable())
{
ExtOut("!VMStat requires a full memory dump (.dump /ma) or a live process.\n");
}
else
{
vmstat();
}
return Status;
} // DECLARE_API( vmmap )
/**********************************************************************\
* Routine Description: *
* *
* This function saves a dll to a file. *
* *
\**********************************************************************/
DECLARE_API(SaveModule)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
StringHolder Location;
DWORD_PTR moduleAddr = NULL;
BOOL bIsImage;
CMDValue arg[] =
{ // vptr, type
{&moduleAddr, COHEX},
{&Location.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
if (nArg != 2)
{
ExtOut("Usage: SaveModule <address> <file to save>\n");
return Status;
}
if (moduleAddr == 0) {
ExtOut ("Invalid arg\n");
return Status;
}
char* ptr = Location.data;
DWORD_PTR dllBase = 0;
ULONG64 base;
if (g_ExtSymbols->GetModuleByOffset(TO_CDADDR(moduleAddr),0,NULL,&base) == S_OK)
{
dllBase = TO_TADDR(base);
}
else if (IsModule(moduleAddr))
{
DacpModuleData module;
module.Request(g_sos, TO_CDADDR(moduleAddr));
dllBase = TO_TADDR(module.ilBase);
if (dllBase == 0)
{
ExtOut ("Module does not have base address\n");
return Status;
}
}
else
{
ExtOut ("%p is not a Module or base address\n", SOS_PTR(moduleAddr));
return Status;
}
MEMORY_BASIC_INFORMATION64 mbi;
if (FAILED(g_ExtData2->QueryVirtual(TO_CDADDR(dllBase), &mbi)))
{
ExtOut("Failed to retrieve information about segment %p", SOS_PTR(dllBase));
return Status;
}
// module loaded as an image or mapped as a flat file?
bIsImage = (mbi.Type == MEM_IMAGE);
IMAGE_DOS_HEADER DosHeader;
if (g_ExtData->ReadVirtual(TO_CDADDR(dllBase), &DosHeader, sizeof(DosHeader), NULL) != S_OK)
return S_FALSE;
IMAGE_NT_HEADERS Header;
if (g_ExtData->ReadVirtual(TO_CDADDR(dllBase + DosHeader.e_lfanew), &Header, sizeof(Header), NULL) != S_OK)
return S_FALSE;
DWORD_PTR sectionAddr = dllBase + DosHeader.e_lfanew + offsetof(IMAGE_NT_HEADERS,OptionalHeader)
+ Header.FileHeader.SizeOfOptionalHeader;
IMAGE_SECTION_HEADER section;
struct MemLocation
{
DWORD_PTR VAAddr;
DWORD_PTR VASize;
DWORD_PTR FileAddr;
DWORD_PTR FileSize;
};
int nSection = Header.FileHeader.NumberOfSections;
ExtOut("%u sections in file\n",nSection);
MemLocation *memLoc = (MemLocation*)_alloca(nSection*sizeof(MemLocation));
int indxSec = -1;
int slot;
for (int n = 0; n < nSection; n++)
{
if (g_ExtData->ReadVirtual(TO_CDADDR(sectionAddr), §ion, sizeof(section), NULL) == S_OK)
{
for (slot = 0; slot <= indxSec; slot ++)
if (section.PointerToRawData < memLoc[slot].FileAddr)
break;
for (int k = indxSec; k >= slot; k --)
memcpy(&memLoc[k+1], &memLoc[k], sizeof(MemLocation));
memLoc[slot].VAAddr = section.VirtualAddress;
memLoc[slot].VASize = section.Misc.VirtualSize;
memLoc[slot].FileAddr = section.PointerToRawData;
memLoc[slot].FileSize = section.SizeOfRawData;
ExtOut("section %d - VA=%x, VASize=%x, FileAddr=%x, FileSize=%x\n",
n, memLoc[slot].VAAddr,memLoc[slot]. VASize,memLoc[slot].FileAddr,
memLoc[slot].FileSize);
indxSec ++;
}
else
{
ExtOut("Fail to read PE section info\n");
return Status;
}
sectionAddr += sizeof(section);
}
if (ptr[0] == '\0')
{
ExtOut ("File not specified\n");
return Status;
}
PCSTR file = ptr;
ptr += strlen(ptr)-1;
while (isspace(*ptr))
{
*ptr = '\0';
ptr --;
}
HANDLE hFile = CreateFileA(file,GENERIC_WRITE,0,NULL,CREATE_ALWAYS,0,NULL);
if (hFile == INVALID_HANDLE_VALUE)
{
ExtOut ("Fail to create file %s\n", file);
return Status;
}
ULONG pageSize = OSPageSize();
char *buffer = (char *)_alloca(pageSize);
DWORD nRead;
DWORD nWrite;
// NT PE Headers
TADDR dwAddr = dllBase;
TADDR dwEnd = dllBase + Header.OptionalHeader.SizeOfHeaders;
while (dwAddr < dwEnd)
{
nRead = pageSize;
if (dwEnd - dwAddr < nRead)
nRead = (ULONG)(dwEnd - dwAddr);
if (g_ExtData->ReadVirtual(TO_CDADDR(dwAddr), buffer, nRead, &nRead) == S_OK)
{
WriteFile(hFile,buffer,nRead,&nWrite,NULL);
}
else
{
ExtOut ("Fail to read memory\n");
goto end;
}
dwAddr += nRead;
}
for (slot = 0; slot <= indxSec; slot ++)
{
dwAddr = dllBase + (bIsImage ? memLoc[slot].VAAddr : memLoc[slot].FileAddr);
dwEnd = memLoc[slot].FileSize + dwAddr - 1;
while (dwAddr <= dwEnd)
{
nRead = pageSize;
if (dwEnd - dwAddr + 1 < pageSize)
nRead = (ULONG)(dwEnd - dwAddr + 1);
if (g_ExtData->ReadVirtual(TO_CDADDR(dwAddr), buffer, nRead, &nRead) == S_OK)
{
WriteFile(hFile,buffer,nRead,&nWrite,NULL);
}
else
{
ExtOut ("Fail to read memory\n");
goto end;
}
dwAddr += pageSize;
}
}
end:
CloseHandle (hFile);
return Status;
}
#endif // FEATURE_PAL
DECLARE_API(dbgout)
{
INIT_API_EXT();
BOOL bOff = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-off", &bOff, COBOOL, FALSE},
};
if (!GetCMDOption(args, option, _countof(option), NULL, 0, NULL))
{
return Status;
}
Output::SetDebugOutputEnabled(!bOff);
ExtOut("Debug output logging %s\n", Output::IsDebugOutputEnabled() ? "enabled" : "disabled");
return Status;
}
static HRESULT DumpMDInfoBuffer(DWORD_PTR dwStartAddr, DWORD Flags, ULONG64 Esp,
ULONG64 IPAddr, StringOutput& so)
{
#define DOAPPEND(str) \
do { \
if (!so.Append((str))) { \
return E_OUTOFMEMORY; \
}} while (0)
// Should we skip explicit frames? They are characterized by Esp = 0, && Eip = 0 or 1.
// See comment in FormatGeneratedException() for explanation why on non_IA64 Eip is 1, and not 0
if (!(Flags & SOS_STACKTRACE_SHOWEXPLICITFRAMES) && (Esp == 0) && (IPAddr == 1))
{
return S_FALSE;
}
DacpMethodDescData MethodDescData;
if (MethodDescData.Request(g_sos, TO_CDADDR(dwStartAddr)) != S_OK)
{
return E_FAIL;
}
ArrayHolder<WCHAR> wszNameBuffer = new WCHAR[MAX_LONGPATH+1];
if (Flags & SOS_STACKTRACE_SHOWADDRESSES)
{
_snwprintf_s(wszNameBuffer, MAX_LONGPATH, MAX_LONGPATH, W("%p %p "), (void*)(size_t) Esp, (void*)(size_t) IPAddr); // _TRUNCATE
DOAPPEND(wszNameBuffer);
}
DacpModuleData dmd;
BOOL bModuleNameWorked = FALSE;
ULONG64 addrInModule = IPAddr;
if (dmd.Request(g_sos, MethodDescData.ModulePtr) == S_OK)
{
CLRDATA_ADDRESS base = 0;
if (g_sos->GetPEFileBase(dmd.File, &base) == S_OK)
{
if (base)
{
addrInModule = base;
}
}
}
ULONG Index;
ULONG64 base;
if (g_ExtSymbols->GetModuleByOffset(UL64_TO_CDA(addrInModule), 0, &Index, &base) == S_OK)
{
ArrayHolder<char> szModuleName = new char[MAX_LONGPATH+1];
if (g_ExtSymbols->GetModuleNames(Index, base, NULL, 0, NULL, szModuleName, MAX_LONGPATH, NULL, NULL, 0, NULL) == S_OK)
{
MultiByteToWideChar (CP_ACP, 0, szModuleName, MAX_LONGPATH, wszNameBuffer, MAX_LONGPATH);
DOAPPEND (wszNameBuffer);
bModuleNameWorked = TRUE;
}
}
else
{
if (g_sos->GetPEFileName(dmd.File, MAX_LONGPATH, wszNameBuffer, NULL) == S_OK)
{
if (wszNameBuffer[0] != W('\0'))
{
WCHAR *pJustName = _wcsrchr(wszNameBuffer, DIRECTORY_SEPARATOR_CHAR_W);
if (pJustName == NULL)
pJustName = wszNameBuffer - 1;
DOAPPEND(pJustName + 1);
bModuleNameWorked = TRUE;
}
}
}
// Under certain circumstances DacpMethodDescData::GetMethodDescName()
// returns a module qualified method name
HRESULT hr = g_sos->GetMethodDescName(dwStartAddr, MAX_LONGPATH, wszNameBuffer, NULL);
WCHAR* pwszMethNameBegin = (hr != S_OK ? NULL : _wcschr(wszNameBuffer, L'!'));
if (!bModuleNameWorked && hr == S_OK && pwszMethNameBegin != NULL)
{
// if we weren't able to get the module name, but GetMethodDescName returned
// the module as part of the returned method name, use this data
DOAPPEND(wszNameBuffer);
}
else
{
if (!bModuleNameWorked)
{
DOAPPEND (W("UNKNOWN"));
}
DOAPPEND(W("!"));
if (hr == S_OK)
{
// the module name we retrieved above from debugger will take
// precedence over the name possibly returned by GetMethodDescName()
DOAPPEND(pwszMethNameBegin != NULL ? (pwszMethNameBegin+1) : (WCHAR *)wszNameBuffer);
}
else
{
DOAPPEND(W("UNKNOWN"));
}
}
ULONG64 Displacement = (IPAddr - MethodDescData.NativeCodeAddr);
if (Displacement)
{
_snwprintf_s(wszNameBuffer, MAX_LONGPATH, MAX_LONGPATH, W("+%#x"), Displacement); // _TRUNCATE
DOAPPEND (wszNameBuffer);
}
return S_OK;
#undef DOAPPEND
}
#ifndef FEATURE_PAL
BOOL AppendContext(LPVOID pTransitionContexts, size_t maxCount, size_t *pcurCount, size_t uiSizeOfContext,
CROSS_PLATFORM_CONTEXT *context)
{
if (pTransitionContexts == NULL || *pcurCount >= maxCount)
{
++(*pcurCount);
return FALSE;
}
if (uiSizeOfContext == sizeof(StackTrace_SimpleContext))
{
StackTrace_SimpleContext *pSimple = (StackTrace_SimpleContext *) pTransitionContexts;
g_targetMachine->FillSimpleContext(&pSimple[*pcurCount], context);
}
else if (uiSizeOfContext == g_targetMachine->GetContextSize())
{
// FillTargetContext ensures we only write uiSizeOfContext bytes in pTransitionContexts
// and not sizeof(CROSS_PLATFORM_CONTEXT) bytes (which would overrun).
g_targetMachine->FillTargetContext(pTransitionContexts, context, (int)(*pcurCount));
}
else
{
return FALSE;
}
++(*pcurCount);
return TRUE;
}
HRESULT CALLBACK ImplementEFNStackTrace(
PDEBUG_CLIENT client,
__out_ecount_opt(*puiTextLength) WCHAR wszTextOut[],
size_t *puiTextLength,
LPVOID pTransitionContexts,
size_t *puiTransitionContextCount,
size_t uiSizeOfContext,
DWORD Flags)
{
#define DOAPPEND(str) if (!so.Append((str))) { \
Status = E_OUTOFMEMORY; \
goto Exit; \
}
HRESULT Status = E_FAIL;
StringOutput so;
size_t transitionContextCount = 0;
if (puiTextLength == NULL)
{
return E_INVALIDARG;
}
if (pTransitionContexts)
{
if (puiTransitionContextCount == NULL)
{
return E_INVALIDARG;
}
// Do error checking on context size
if ((uiSizeOfContext != g_targetMachine->GetContextSize()) &&
(uiSizeOfContext != sizeof(StackTrace_SimpleContext)))
{
return E_INVALIDARG;
}
}
IXCLRDataStackWalk *pStackWalk = NULL;
IXCLRDataTask* Task;
ULONG ThreadId;
if ((Status = g_ExtSystem->GetCurrentThreadSystemId(&ThreadId)) != S_OK ||
(Status = g_clrData->GetTaskByOSThreadID(ThreadId, &Task)) != S_OK)
{
// Not a managed thread.
return SOS_E_NOMANAGEDCODE;
}
Status = Task->CreateStackWalk(CLRDATA_SIMPFRAME_UNRECOGNIZED |
CLRDATA_SIMPFRAME_MANAGED_METHOD |
CLRDATA_SIMPFRAME_RUNTIME_MANAGED_CODE |
CLRDATA_SIMPFRAME_RUNTIME_UNMANAGED_CODE,
&pStackWalk);
Task->Release();
if (Status != S_OK)
{
if (Status == E_FAIL)
{
return SOS_E_NOMANAGEDCODE;
}
return Status;
}
#ifdef _TARGET_WIN64_
ULONG numFrames = 0;
BOOL bInNative = TRUE;
Status = GetContextStackTrace(ThreadId, &numFrames);
if (FAILED(Status))
{
goto Exit;
}
for (ULONG i = 0; i < numFrames; i++)
{
PDEBUG_STACK_FRAME pCur = g_Frames + i;
CLRDATA_ADDRESS pMD;
if (g_sos->GetMethodDescPtrFromIP(pCur->InstructionOffset, &pMD) == S_OK)
{
if (bInNative || transitionContextCount==0)
{
// We only want to list one transition frame if there are multiple frames.
bInNative = FALSE;
DOAPPEND (W("(TransitionMU)\n"));
// For each transition, we need to store the context information
if (puiTransitionContextCount)
{
// below we cast the i-th AMD64_CONTEXT to CROSS_PLATFORM_CONTEXT
AppendContext (pTransitionContexts, *puiTransitionContextCount,
&transitionContextCount, uiSizeOfContext, (CROSS_PLATFORM_CONTEXT*)(&(g_FrameContexts[i])));
}
else
{
transitionContextCount++;
}
}
Status = DumpMDInfoBuffer((DWORD_PTR) pMD, Flags,
pCur->StackOffset, pCur->InstructionOffset, so);
if (FAILED(Status))
{
goto Exit;
}
else if (Status == S_OK)
{
DOAPPEND (W("\n"));
}
// for S_FALSE do not append anything
}
else
{
if (!bInNative)
{
// We only want to list one transition frame if there are multiple frames.
bInNative = TRUE;
DOAPPEND (W("(TransitionUM)\n"));
// For each transition, we need to store the context information
if (puiTransitionContextCount)
{
AppendContext (pTransitionContexts, *puiTransitionContextCount,
&transitionContextCount, uiSizeOfContext, (CROSS_PLATFORM_CONTEXT*)(&(g_FrameContexts[i])));
}
else
{
transitionContextCount++;
}
}
}
}
Exit:
#else // _TARGET_WIN64_
#ifdef _DEBUG
size_t prevLength = 0;
static WCHAR wszNameBuffer[1024]; // should be large enough
wcscpy_s(wszNameBuffer, 1024, W("Frame")); // default value
#endif
BOOL bInNative = TRUE;
UINT frameCount = 0;
do
{
DacpFrameData FrameData;
if ((Status = FrameData.Request(pStackWalk)) != S_OK)
{
goto Exit;
}
CROSS_PLATFORM_CONTEXT context;
if ((Status=pStackWalk->GetContext(DT_CONTEXT_FULL, g_targetMachine->GetContextSize(),
NULL, (BYTE *)&context))!=S_OK)
{
goto Exit;
}
ExtDbgOut ( " * Ctx[BSI]: %08x %08x %08x ", GetBP(context), GetSP(context), GetIP(context) );
CLRDATA_ADDRESS pMD;
if (!FrameData.frameAddr)
{
if (bInNative || transitionContextCount==0)
{
// We only want to list one transition frame if there are multiple frames.
bInNative = FALSE;
DOAPPEND (W("(TransitionMU)\n"));
// For each transition, we need to store the context information
if (puiTransitionContextCount)
{
AppendContext (pTransitionContexts, *puiTransitionContextCount,
&transitionContextCount, uiSizeOfContext, &context);
}
else
{
transitionContextCount++;
}
}
// we may have a method, try to get the methoddesc
if (g_sos->GetMethodDescPtrFromIP(GetIP(context), &pMD)==S_OK)
{
Status = DumpMDInfoBuffer((DWORD_PTR) pMD, Flags,
GetSP(context), GetIP(context), so);
if (FAILED(Status))
{
goto Exit;
}
else if (Status == S_OK)
{
DOAPPEND (W("\n"));
}
// for S_FALSE do not append anything
}
}
else
{
#ifdef _DEBUG
if (Output::IsDebugOutputEnabled())
{
DWORD_PTR vtAddr;
MOVE(vtAddr, TO_TADDR(FrameData.frameAddr));
if (g_sos->GetFrameName(TO_CDADDR(vtAddr), 1024, wszNameBuffer, NULL) == S_OK)
ExtDbgOut("[%ls: %08x] ", wszNameBuffer, FrameData.frameAddr);
else
ExtDbgOut("[Frame: %08x] ", FrameData.frameAddr);
}
#endif
if (!bInNative)
{
// We only want to list one transition frame if there are multiple frames.
bInNative = TRUE;
DOAPPEND (W("(TransitionUM)\n"));
// For each transition, we need to store the context information
if (puiTransitionContextCount)
{
AppendContext (pTransitionContexts, *puiTransitionContextCount,
&transitionContextCount, uiSizeOfContext, &context);
}
else
{
transitionContextCount++;
}
}
}
#ifdef _DEBUG
if (so.Length() > prevLength)
{
ExtDbgOut ( "%ls", so.String()+prevLength );
prevLength = so.Length();
}
else
ExtDbgOut ( "\n" );
#endif
}
while ((frameCount++) < MAX_STACK_FRAMES && pStackWalk->Next()==S_OK);
Status = S_OK;
Exit:
#endif // _TARGET_WIN64_
if (pStackWalk)
{
pStackWalk->Release();
pStackWalk = NULL;
}
// We have finished. Does the user want to copy this data to a buffer?
if (Status == S_OK)
{
if(wszTextOut)
{
// They want at least partial output
wcsncpy_s (wszTextOut, *puiTextLength, so.String(), *puiTextLength-1); // _TRUNCATE
}
else
{
*puiTextLength = _wcslen (so.String()) + 1;
}
if (puiTransitionContextCount)
{
*puiTransitionContextCount = transitionContextCount;
}
}
return Status;
}
// TODO: Convert PAL_TRY_NAKED to something that works on the Mac.
HRESULT CALLBACK ImplementEFNStackTraceTry(
PDEBUG_CLIENT client,
__out_ecount_opt(*puiTextLength) WCHAR wszTextOut[],
size_t *puiTextLength,
LPVOID pTransitionContexts,
size_t *puiTransitionContextCount,
size_t uiSizeOfContext,
DWORD Flags)
{
HRESULT Status = E_FAIL;
PAL_TRY_NAKED
{
Status = ImplementEFNStackTrace(client, wszTextOut, puiTextLength,
pTransitionContexts, puiTransitionContextCount,
uiSizeOfContext, Flags);
}
PAL_EXCEPT_NAKED (EXCEPTION_EXECUTE_HANDLER)
{
}
PAL_ENDTRY_NAKED
return Status;
}
// See sos_stacktrace.h for the contract with the callers regarding the LPVOID arguments.
HRESULT CALLBACK _EFN_StackTrace(
PDEBUG_CLIENT client,
__out_ecount_opt(*puiTextLength) WCHAR wszTextOut[],
size_t *puiTextLength,
__out_bcount_opt(uiSizeOfContext*(*puiTransitionContextCount)) LPVOID pTransitionContexts,
size_t *puiTransitionContextCount,
size_t uiSizeOfContext,
DWORD Flags)
{
INIT_API();
Status = ImplementEFNStackTraceTry(client, wszTextOut, puiTextLength,
pTransitionContexts, puiTransitionContextCount,
uiSizeOfContext, Flags);
return Status;
}
BOOL FormatFromRemoteString(DWORD_PTR strObjPointer, __out_ecount(cchString) PWSTR wszBuffer, ULONG cchString)
{
BOOL bRet = FALSE;
wszBuffer[0] = L'\0';
DacpObjectData objData;
if (objData.Request(g_sos, TO_CDADDR(strObjPointer))!=S_OK)
{
return bRet;
}
strobjInfo stInfo;
if (MOVE(stInfo, strObjPointer) != S_OK)
{
return bRet;
}
DWORD dwBufLength = 0;
if (!ClrSafeInt<DWORD>::addition(stInfo.m_StringLength, 1, dwBufLength))
{
ExtOut("<integer overflow>\n");
return bRet;
}
LPWSTR pwszBuf = new NOTHROW WCHAR[dwBufLength];
if (pwszBuf == NULL)
{
return bRet;
}
if (g_sos->GetObjectStringData(TO_CDADDR(strObjPointer), stInfo.m_StringLength+1, pwszBuf, NULL)!=S_OK)
{
delete [] pwszBuf;
return bRet;
}
// String is in format
// <SP><SP><SP>at <function name>(args,...)\n
// ...
// Parse and copy just <function name>(args,...)
LPWSTR pwszPointer = pwszBuf;
WCHAR PSZSEP[] = W(" at ");
UINT Length = 0;
while(1)
{
if (_wcsncmp(pwszPointer, PSZSEP, _countof(PSZSEP)-1) != 0)
{
delete [] pwszBuf;
return bRet;
}
pwszPointer += _wcslen(PSZSEP);
LPWSTR nextPos = _wcsstr(pwszPointer, PSZSEP);
if (nextPos == NULL)
{
// Done! Note that we are leaving the function before we add the last
// line of stack trace to the output string. This is on purpose because
// this string needs to be merged with a real trace, and the last line
// of the trace will be common to the real trace.
break;
}
WCHAR c = *nextPos;
*nextPos = L'\0';
// Buffer is calculated for sprintf below (" %p %p %S\n");
WCHAR wszLineBuffer[mdNameLen + 8 + sizeof(size_t)*2];
// Note that we don't add a newline because we have this embedded in wszLineBuffer
swprintf_s(wszLineBuffer, _countof(wszLineBuffer), W(" %p %p %s"), (void*)(size_t)-1, (void*)(size_t)-1, pwszPointer);
Length += (UINT)_wcslen(wszLineBuffer);
if (wszBuffer)
{
wcsncat_s(wszBuffer, cchString, wszLineBuffer, _TRUNCATE);
}
*nextPos = c;
// Move to the next line.
pwszPointer = nextPos;
}
delete [] pwszBuf;
// Return TRUE only if the stack string had any information that was successfully parsed.
// (Length > 0) is a good indicator of that.
bRet = (Length > 0);
return bRet;
}
HRESULT AppendExceptionInfo(CLRDATA_ADDRESS cdaObj,
__out_ecount(cchString) PWSTR wszStackString,
ULONG cchString,
BOOL bNestedCase) // If bNestedCase is TRUE, the last frame of the computed stack is left off
{
DacpObjectData objData;
if (objData.Request(g_sos, cdaObj) != S_OK)
{
return E_FAIL;
}
// Make sure it is an exception object, and get the MT of Exception
CLRDATA_ADDRESS exceptionMT = isExceptionObj(objData.MethodTable);
if (exceptionMT == NULL)
{
return E_INVALIDARG;
}
// First try to get exception object data using ISOSDacInterface2
DacpExceptionObjectData excData;
BOOL bGotExcData = SUCCEEDED(excData.Request(g_sos, cdaObj));
int iOffset;
// Is there a _remoteStackTraceString? We'll want to prepend that data.
// We only have string data, so IP/SP info has to be set to -1.
DWORD_PTR strPointer;
if (bGotExcData)
{
strPointer = TO_TADDR(excData.RemoteStackTraceString);
}
else
{
iOffset = GetObjFieldOffset (cdaObj, objData.MethodTable, W("_remoteStackTraceString"));
MOVE (strPointer, TO_TADDR(cdaObj) + iOffset);
}
if (strPointer)
{
WCHAR *pwszBuffer = new NOTHROW WCHAR[cchString];
if (pwszBuffer == NULL)
{
return E_OUTOFMEMORY;
}
if (FormatFromRemoteString(strPointer, pwszBuffer, cchString))
{
// Prepend this stuff to the string for the user
wcsncat_s(wszStackString, cchString, pwszBuffer, _TRUNCATE);
}
delete[] pwszBuffer;
}
BOOL bAsync = bGotExcData ? IsAsyncException(excData)
: IsAsyncException(cdaObj, objData.MethodTable);
DWORD_PTR arrayPtr;
if (bGotExcData)
{
arrayPtr = TO_TADDR(excData.StackTrace);
}
else
{
iOffset = GetObjFieldOffset (cdaObj, objData.MethodTable, W("_stackTrace"));
MOVE (arrayPtr, TO_TADDR(cdaObj) + iOffset);
}
if (arrayPtr)
{
DWORD arrayLen;
MOVE (arrayLen, arrayPtr + sizeof(DWORD_PTR));
if (arrayLen)
{
// This code is accessing the StackTraceInfo class in the runtime.
// See: https://github.com/dotnet/runtime/blob/master/src/coreclr/src/vm/clrex.h
#ifdef _TARGET_WIN64_
DWORD_PTR dataPtr = arrayPtr + sizeof(DWORD_PTR) + sizeof(DWORD) + sizeof(DWORD);
#else
DWORD_PTR dataPtr = arrayPtr + sizeof(DWORD_PTR) + sizeof(DWORD);
#endif // _TARGET_WIN64_
size_t stackTraceSize = 0;
MOVE (stackTraceSize, dataPtr); // data length is stored at the beginning of the array in this case
DWORD cbStackSize = static_cast<DWORD>(stackTraceSize * sizeof(StackTraceElement));
dataPtr += sizeof(size_t) + sizeof(size_t); // skip the array header, then goes the data
if (stackTraceSize != 0)
{
size_t iLength = FormatGeneratedException (dataPtr, cbStackSize, NULL, 0, bAsync, bNestedCase);
WCHAR *pwszBuffer = new NOTHROW WCHAR[iLength + 1];
if (pwszBuffer)
{
FormatGeneratedException(dataPtr, cbStackSize, pwszBuffer, iLength + 1, bAsync, bNestedCase);
wcsncat_s(wszStackString, cchString, pwszBuffer, _TRUNCATE);
delete[] pwszBuffer;
}
else
{
return E_OUTOFMEMORY;
}
}
}
}
return S_OK;
}
HRESULT ImplementEFNGetManagedExcepStack(
CLRDATA_ADDRESS cdaStackObj,
__out_ecount(cchString) PWSTR wszStackString,
ULONG cchString)
{
HRESULT Status = E_FAIL;
if (wszStackString == NULL || cchString == 0)
{
return E_INVALIDARG;
}
CLRDATA_ADDRESS threadAddr = GetCurrentManagedThread();
DacpThreadData Thread;
BOOL bCanUseThreadContext = TRUE;
ZeroMemory(&Thread, sizeof(DacpThreadData));
if ((threadAddr == NULL) || (Thread.Request(g_sos, threadAddr) != S_OK))
{
// The current thread is unmanaged
bCanUseThreadContext = FALSE;
}
if (cdaStackObj == NULL)
{
if (!bCanUseThreadContext)
{
return E_INVALIDARG;
}
TADDR taLTOH = NULL;
if ((!SafeReadMemory(TO_TADDR(Thread.lastThrownObjectHandle),
&taLTOH,
sizeof(taLTOH), NULL)) || (taLTOH==NULL))
{
return Status;
}
else
{
cdaStackObj = TO_CDADDR(taLTOH);
}
}
// Put the stack trace header on
AddExceptionHeader(wszStackString, cchString);
// First is there a nested exception?
if (bCanUseThreadContext && Thread.firstNestedException)
{
CLRDATA_ADDRESS obj = 0, next = 0;
CLRDATA_ADDRESS currentNested = Thread.firstNestedException;
do
{
Status = g_sos->GetNestedExceptionData(currentNested, &obj, &next);
// deal with the inability to read a nested exception gracefully
if (Status != S_OK)
{
break;
}
Status = AppendExceptionInfo(obj, wszStackString, cchString, TRUE);
currentNested = next;
}
while(currentNested != NULL);
}
Status = AppendExceptionInfo(cdaStackObj, wszStackString, cchString, FALSE);
return Status;
}
// TODO: Enable this when ImplementEFNStackTraceTry is fixed.
// This function, like VerifyDAC, exists for the purpose of testing
// hard-to-get-to SOS APIs.
DECLARE_API(VerifyStackTrace)
{
INIT_API();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
BOOL bVerifyManagedExcepStack = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-ManagedExcepStack", &bVerifyManagedExcepStack, COBOOL, FALSE},
};
if (!GetCMDOption(args, option, _countof(option), NULL,0,NULL))
{
return Status;
}
if (bVerifyManagedExcepStack)
{
CLRDATA_ADDRESS threadAddr = GetCurrentManagedThread();
DacpThreadData Thread;
TADDR taExc = NULL;
if ((threadAddr == NULL) || (Thread.Request(g_sos, threadAddr) != S_OK))
{
ExtOut("The current thread is unmanaged\n");
return Status;
}
TADDR taLTOH = NULL;
if ((!SafeReadMemory(TO_TADDR(Thread.lastThrownObjectHandle),
&taLTOH,
sizeof(taLTOH), NULL)) || (taLTOH == NULL))
{
ExtOut("There is no current managed exception on this thread\n");
return Status;
}
else
{
taExc = taLTOH;
}
const SIZE_T cchStr = 4096;
WCHAR *wszStr = (WCHAR *)alloca(cchStr * sizeof(WCHAR));
if (ImplementEFNGetManagedExcepStack(TO_CDADDR(taExc), wszStr, cchStr) != S_OK)
{
ExtOut("Error!\n");
return Status;
}
ExtOut("_EFN_GetManagedExcepStack(%P, wszStr, sizeof(wszStr)) returned:\n", SOS_PTR(taExc));
ExtOut("%S\n", wszStr);
if (ImplementEFNGetManagedExcepStack((ULONG64)NULL, wszStr, cchStr) != S_OK)
{
ExtOut("Error!\n");
return Status;
}
ExtOut("_EFN_GetManagedExcepStack(NULL, wszStr, sizeof(wszStr)) returned:\n");
ExtOut("%S\n", wszStr);
}
else
{
size_t textLength = 0;
size_t contextLength = 0;
Status = ImplementEFNStackTraceTry(client,
NULL,
&textLength,
NULL,
&contextLength,
0,
0);
if (Status != S_OK)
{
ExtOut("Error: %lx\n", Status);
return Status;
}
ExtOut("Number of characters requested: %d\n", textLength);
WCHAR *wszBuffer = new NOTHROW WCHAR[textLength + 1];
if (wszBuffer == NULL)
{
ReportOOM();
return Status;
}
// For the transition contexts buffer the callers are expected to allocate
// contextLength * sizeof(TARGET_CONTEXT), and not
// contextLength * sizeof(CROSS_PLATFORM_CONTEXT). See sos_stacktrace.h for
// details.
LPBYTE pContexts = new NOTHROW BYTE[contextLength * g_targetMachine->GetContextSize()];
if (pContexts == NULL)
{
ReportOOM();
delete[] wszBuffer;
return Status;
}
Status = ImplementEFNStackTrace(client,
wszBuffer,
&textLength,
pContexts,
&contextLength,
g_targetMachine->GetContextSize(),
0);
if (Status != S_OK)
{
ExtOut("Error: %lx\n", Status);
delete[] wszBuffer;
delete [] pContexts;
return Status;
}
ExtOut("%S\n", wszBuffer);
ExtOut("Context information:\n");
if (IsDbgTargetX86())
{
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"Ebp", "Esp", "Eip");
}
else if (IsDbgTargetAmd64())
{
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"Rbp", "Rsp", "Rip");
}
else if (IsDbgTargetArm())
{
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"FP", "SP", "PC");
}
else
{
ExtOut("Unsupported platform");
delete [] pContexts;
delete[] wszBuffer;
return S_FALSE;
}
for (size_t j=0; j < contextLength; j++)
{
CROSS_PLATFORM_CONTEXT *pCtx = (CROSS_PLATFORM_CONTEXT*)(pContexts + j*g_targetMachine->GetContextSize());
ExtOut("%p %p %p\n", GetBP(*pCtx), GetSP(*pCtx), GetIP(*pCtx));
}
delete [] pContexts;
StackTrace_SimpleContext *pSimple = new NOTHROW StackTrace_SimpleContext[contextLength];
if (pSimple == NULL)
{
ReportOOM();
delete[] wszBuffer;
return Status;
}
Status = ImplementEFNStackTrace(client,
wszBuffer,
&textLength,
pSimple,
&contextLength,
sizeof(StackTrace_SimpleContext),
0);
if (Status != S_OK)
{
ExtOut("Error: %lx\n", Status);
delete[] wszBuffer;
delete [] pSimple;
return Status;
}
ExtOut("Simple Context information:\n");
if (IsDbgTargetX86())
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"Ebp", "Esp", "Eip");
else if (IsDbgTargetAmd64())
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"Rbp", "Rsp", "Rip");
else if (IsDbgTargetArm())
ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
"FP", "SP", "PC");
else
{
ExtOut("Unsupported platform");
delete[] wszBuffer;
delete [] pSimple;
return S_FALSE;
}
for (size_t j=0; j < contextLength; j++)
{
ExtOut("%p %p %p\n", SOS_PTR(pSimple[j].FrameOffset),
SOS_PTR(pSimple[j].StackOffset),
SOS_PTR(pSimple[j].InstructionOffset));
}
delete [] pSimple;
delete[] wszBuffer;
}
return Status;
}
// This is an internal-only Apollo extension to save breakpoint/watch state
DECLARE_API(SaveState)
{
INIT_API_NOEE();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
StringHolder filePath;
CMDValue arg[] =
{ // vptr, type
{&filePath.data, COSTRING},
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return E_FAIL;
}
if(nArg == 0)
{
ExtOut("Usage: !SaveState <file_path>\n");
}
FILE* pFile;
errno_t error = fopen_s(&pFile, filePath.data, "w");
if(error != 0)
{
ExtOut("Failed to open file %s, error=0x%x\n", filePath.data, error);
return E_FAIL;
}
g_bpoints.SaveBreakpoints(pFile);
g_watchCmd.SaveListToFile(pFile);
fclose(pFile);
ExtOut("Session breakpoints and watch expressions saved to %s\n", filePath.data);
return S_OK;
}
#endif // FEATURE_PAL
DECLARE_API(SuppressJitOptimization)
{
INIT_API_NOEE();
MINIDUMP_NOT_SUPPORTED();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
StringHolder onOff;
CMDValue arg[] =
{ // vptr, type
{&onOff.data, COSTRING},
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return E_FAIL;
}
if (nArg == 1 && (_stricmp(onOff.data, "On") == 0))
{
// if CLR is already loaded, try to change the flags now
if (CheckEEDll() == S_OK)
{
SetNGENCompilerFlags(CORDEBUG_JIT_DISABLE_OPTIMIZATION);
}
if (!g_fAllowJitOptimization)
{
ExtOut("JIT optimization is already suppressed\n");
}
else
{
g_fAllowJitOptimization = FALSE;
g_ExtControl->Execute(DEBUG_EXECUTE_NOT_LOGGED, "sxe -c \"!SOSHandleCLRN\" clrn", 0);
ExtOut("JIT optimization will be suppressed\n");
}
}
else if(nArg == 1 && (_stricmp(onOff.data, "Off") == 0))
{
// if CLR is already loaded, try to change the flags now
if (CheckEEDll() == S_OK)
{
SetNGENCompilerFlags(CORDEBUG_JIT_DEFAULT);
}
if (g_fAllowJitOptimization)
ExtOut("JIT optimization is already permitted\n");
else
{
g_fAllowJitOptimization = TRUE;
ExtOut("JIT optimization will be permitted\n");
}
}
else
{
ExtOut("Usage: !SuppressJitOptimization <on|off>\n");
}
return S_OK;
}
// Uses ICorDebug to set the state of desired NGEN compiler flags. This can suppress pre-jitted optimized
// code
HRESULT SetNGENCompilerFlags(DWORD flags)
{
HRESULT hr;
ToRelease<ICorDebugProcess2> proc2;
ICorDebugProcess* pCorDebugProcess;
if (FAILED(hr = g_pRuntime->GetCorDebugInterface(&pCorDebugProcess)))
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed. Failed to load ICorDebug HR = 0x%x\n", hr);
}
else if (FAILED(pCorDebugProcess->QueryInterface(__uuidof(ICorDebugProcess2), (void**) &proc2)))
{
if (flags != CORDEBUG_JIT_DEFAULT)
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed. This CLR version doesn't support the functionality\n");
}
else
{
hr = S_OK;
}
}
else if (FAILED(hr = proc2->SetDesiredNGENCompilerFlags(flags)))
{
// Versions of CLR that don't have SetDesiredNGENCompilerFlags DAC-ized will return E_FAIL.
// This was first supported in the clr_triton branch around 4/1/12, Apollo release
// It will likely be supported in desktop CLR during Dev12
if(hr == E_FAIL)
{
if(flags != CORDEBUG_JIT_DEFAULT)
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed. This CLR version doesn't support the functionality\n");
}
else
{
hr = S_OK;
}
}
else if (hr == CORDBG_E_NGEN_NOT_SUPPORTED)
{
if (flags != CORDEBUG_JIT_DEFAULT)
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed. This CLR version doesn't support NGEN\n");
}
else
{
hr = S_OK;
}
}
else if (hr == CORDBG_E_MUST_BE_IN_CREATE_PROCESS)
{
DWORD currentFlags = 0;
if (FAILED(hr = proc2->GetDesiredNGENCompilerFlags(¤tFlags)))
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed. GetDesiredNGENCompilerFlags failed hr=0x%x\n", hr);
}
else if (currentFlags != flags)
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed at this time. This setting is fixed once CLR starts\n");
}
else
{
hr = S_OK;
}
}
else
{
ExtOut("SOS: warning, prejitted code optimizations could not be changed at this time. SetDesiredNGENCompilerFlags hr = 0x%x\n", hr);
}
}
return hr;
}
DECLARE_API(StopOnCatch)
{
INIT_API();
MINIDUMP_NOT_SUPPORTED();
g_stopOnNextCatch = TRUE;
ULONG32 flags = 0;
g_clrData->GetOtherNotificationFlags(&flags);
flags |= CLRDATA_NOTIFY_ON_EXCEPTION_CATCH_ENTER;
g_clrData->SetOtherNotificationFlags(flags);
ExtOut("Debuggee will break the next time a managed exception is caught during execution\n");
return S_OK;
}
#ifndef FEATURE_PAL
// This is an undocumented SOS extension command intended to help test SOS
// It causes the Dml output to be printed to the console uninterpretted so
// that a test script can read the commands which are hidden in the markup
DECLARE_API(ExposeDML)
{
Output::SetDMLExposed(true);
return S_OK;
}
// According to kksharma the Windows debuggers always sign-extend
// arguments when calling externally, therefore StackObjAddr
// conforms to CLRDATA_ADDRESS contract.
HRESULT CALLBACK
_EFN_GetManagedExcepStack(
PDEBUG_CLIENT client,
ULONG64 StackObjAddr,
__out_ecount (cbString) PSTR szStackString,
ULONG cbString
)
{
INIT_API();
ArrayHolder<WCHAR> tmpStr = new NOTHROW WCHAR[cbString];
if (tmpStr == NULL)
{
ReportOOM();
return E_OUTOFMEMORY;
}
if (FAILED(Status = ImplementEFNGetManagedExcepStack(StackObjAddr, tmpStr, cbString)))
{
return Status;
}
if (WideCharToMultiByte(CP_ACP, WC_NO_BEST_FIT_CHARS, tmpStr, -1, szStackString, cbString, NULL, NULL) == 0)
{
return E_FAIL;
}
return S_OK;
}
// same as _EFN_GetManagedExcepStack, but returns the stack as a wide string.
HRESULT CALLBACK
_EFN_GetManagedExcepStackW(
PDEBUG_CLIENT client,
ULONG64 StackObjAddr,
__out_ecount(cchString) PWSTR wszStackString,
ULONG cchString
)
{
INIT_API();
return ImplementEFNGetManagedExcepStack(StackObjAddr, wszStackString, cchString);
}
// According to kksharma the Windows debuggers always sign-extend
// arguments when calling externally, therefore objAddr
// conforms to CLRDATA_ADDRESS contract.
HRESULT CALLBACK
_EFN_GetManagedObjectName(
PDEBUG_CLIENT client,
ULONG64 objAddr,
__out_ecount (cbName) PSTR szName,
ULONG cbName
)
{
INIT_API ();
if (!sos::IsObject(objAddr, false))
{
return E_INVALIDARG;
}
sos::Object obj = TO_TADDR(objAddr);
if (WideCharToMultiByte(CP_ACP, 0, obj.GetTypeName(), (int) (_wcslen(obj.GetTypeName()) + 1),
szName, cbName, NULL, NULL) == 0)
{
return E_FAIL;
}
return S_OK;
}
// According to kksharma the Windows debuggers always sign-extend
// arguments when calling externally, therefore objAddr
// conforms to CLRDATA_ADDRESS contract.
HRESULT CALLBACK
_EFN_GetManagedObjectFieldInfo(
PDEBUG_CLIENT client,
ULONG64 objAddr,
__out_ecount (mdNameLen) PSTR szFieldName,
PULONG64 pValue,
PULONG pOffset
)
{
INIT_API();
DacpObjectData objData;
LPWSTR fieldName = (LPWSTR)alloca(mdNameLen * sizeof(WCHAR));
if (szFieldName == NULL || *szFieldName == '\0' ||
objAddr == NULL)
{
return E_FAIL;
}
if (pOffset == NULL && pValue == NULL)
{
// One of these needs to be valid
return E_FAIL;
}
if (FAILED(objData.Request(g_sos, objAddr)))
{
return E_FAIL;
}
MultiByteToWideChar(CP_ACP,0,szFieldName,-1,fieldName,mdNameLen);
int iOffset = GetObjFieldOffset (objAddr, objData.MethodTable, fieldName);
if (iOffset <= 0)
{
return E_FAIL;
}
if (pOffset)
{
*pOffset = (ULONG) iOffset;
}
if (pValue)
{
if (FAILED(g_ExtData->ReadVirtual(UL64_TO_CDA(objAddr + iOffset), pValue, sizeof(ULONG64), NULL)))
{
return E_FAIL;
}
}
return S_OK;
}
DECLARE_API(VerifyGMT)
{
ULONG osThreadId;
{
INIT_API();
CMDValue arg[] =
{ // vptr, type
{&osThreadId, COHEX},
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
}
ULONG64 managedThread;
HRESULT hr = _EFN_GetManagedThread(client, osThreadId, &managedThread);
{
INIT_API();
ONLY_SUPPORTED_ON_WINDOWS_TARGET();
if (SUCCEEDED(hr)) {
ExtOut("%08x %p\n", osThreadId, managedThread);
}
else {
ExtErr("_EFN_GetManagedThread FAILED %08x\n", hr);
}
}
return hr;
}
HRESULT CALLBACK
_EFN_GetManagedThread(
PDEBUG_CLIENT client,
ULONG osThreadId,
PULONG64 pManagedThread)
{
INIT_API();
_ASSERTE(pManagedThread != nullptr);
*pManagedThread = 0;
DacpThreadStoreData threadStore;
if ((Status = threadStore.Request(g_sos)) != S_OK)
{
return Status;
}
CLRDATA_ADDRESS curThread = threadStore.firstThread;
while (curThread)
{
DacpThreadData thread;
if ((Status = thread.Request(g_sos, curThread)) != S_OK)
{
return Status;
}
if (thread.osThreadId == osThreadId)
{
*pManagedThread = (ULONG64)curThread;
return S_OK;
}
curThread = thread.nextThread;
}
return E_INVALIDARG;
}
#endif // FEATURE_PAL
//
// Sets the .NET Core runtime path to use to run the managed code within SOS/native debugger.
//
DECLARE_API(SetHostRuntime)
{
INIT_API_EXT();
StringHolder hostRuntimeDirectory;
CMDValue arg[] =
{
{&hostRuntimeDirectory.data, COSTRING},
};
size_t narg;
if (!GetCMDOption(args, nullptr, 0, arg, _countof(arg), &narg))
{
return E_FAIL;
}
if (narg > 0)
{
if (IsHostingInitialized())
{
ExtErr("Runtime hosting already initialized %s\n", g_hostRuntimeDirectory);
return E_FAIL;
}
if (g_hostRuntimeDirectory != nullptr)
{
free((void*)g_hostRuntimeDirectory);
}
g_hostRuntimeDirectory = _strdup(hostRuntimeDirectory.data);
}
if (g_hostRuntimeDirectory != nullptr)
{
ExtOut("Host runtime path: %s\n", g_hostRuntimeDirectory);
}
return S_OK;
}
//
// Sets the symbol server path.
//
DECLARE_API(SetSymbolServer)
{
INIT_API_EXT();
StringHolder symbolCache;
StringHolder searchDirectory;
StringHolder windowsSymbolPath;
size_t timeoutInMinutes = 0;
BOOL disable = FALSE;
BOOL loadNative = FALSE;
BOOL msdl = FALSE;
BOOL symweb = FALSE;
BOOL logging = FALSE;
CMDOption option[] =
{ // name, vptr, type, hasValue
{"-disable", &disable, COBOOL, FALSE},
{"-cache", &symbolCache.data, COSTRING, TRUE},
{"-directory", &searchDirectory.data, COSTRING, TRUE},
{"-timeout", &timeoutInMinutes, COSIZE_T, TRUE},
{"-ms", &msdl, COBOOL, FALSE},
{"-log", &logging, COBOOL, FALSE},
#ifdef FEATURE_PAL
{"-loadsymbols", &loadNative, COBOOL, FALSE},
{"-sympath", &windowsSymbolPath.data, COSTRING, TRUE},
#else
{"-mi", &symweb, COBOOL, FALSE},
#endif
};
StringHolder symbolServer;
CMDValue arg[] =
{
{&symbolServer.data, COSTRING},
};
size_t narg;
if (!GetCMDOption(args, option, _countof(option), arg, _countof(arg), &narg))
{
return E_FAIL;
}
if (msdl && symweb)
{
ExtErr("Cannot have both -ms and -mi options\n");
return E_FAIL;
}
if ((msdl || symweb) && symbolServer.data != nullptr)
{
ExtErr("Cannot have -ms or -mi option and a symbol server path\n");
return E_FAIL;
}
if (disable) {
DisableSymbolStore();
}
if (logging || msdl || symweb || symbolServer.data != nullptr || symbolCache.data != nullptr || searchDirectory.data != nullptr || windowsSymbolPath.data != nullptr)
{
Status = InitializeSymbolStore(logging, msdl, symweb, symbolServer.data, (int)timeoutInMinutes, symbolCache.data, searchDirectory.data, windowsSymbolPath.data);
if (FAILED(Status))
{
return Status;
}
if (msdl)
{
ExtOut("Added Microsoft public symbol server\n");
}
if (symweb)
{
ExtOut("Added internal symweb symbol server\n");
}
if (symbolServer.data != nullptr)
{
ExtOut("Added symbol server: %s\n", symbolServer.data);
}
if (symbolCache.data != nullptr)
{
ExtOut("Added symbol cache path: %s\n", symbolCache.data);
}
if (searchDirectory.data != nullptr)
{
ExtOut("Added symbol directory path: %s\n", searchDirectory.data);
}
if (windowsSymbolPath.data != nullptr)
{
ExtOut("Added Windows symbol path: %s\n", windowsSymbolPath.data);
}
if (logging)
{
ExtOut("Symbol download logging enabled\n");
}
}
#ifdef FEATURE_PAL
else if (loadNative)
{
Status = LoadNativeSymbols();
}
#endif
else
{
DisplaySymbolStore();
}
return Status;
}
//
// Sets the runtime module path
//
DECLARE_API(SetClrPath)
{
INIT_API_EXT();
StringHolder runtimeModulePath;
CMDValue arg[] =
{
{&runtimeModulePath.data, COSTRING},
};
size_t narg;
if (!GetCMDOption(args, nullptr, 0, arg, _countof(arg), &narg))
{
return E_FAIL;
}
if (narg > 0)
{
if (g_runtimeModulePath != nullptr)
{
free((void*)g_runtimeModulePath);
}
g_runtimeModulePath = _strdup(runtimeModulePath.data);
}
if (g_runtimeModulePath != nullptr)
{
ExtOut("Runtime module path: %s\n", g_runtimeModulePath);
}
return S_OK;
}
void PrintHelp (__in_z LPCSTR pszCmdName)
{
static LPSTR pText = NULL;
if (pText == NULL) {
#ifndef FEATURE_PAL
HGLOBAL hResource = NULL;
HRSRC hResInfo = FindResource (g_hInstance, TEXT ("DOCUMENTATION"), TEXT ("TEXT"));
if (hResInfo) hResource = LoadResource (g_hInstance, hResInfo);
if (hResource) pText = (LPSTR) LockResource (hResource);
if (pText == NULL)
{
ExtErr("Error loading documentation resource\n");
return;
}
#else
ArrayHolder<char> szSOSModulePath = new char[MAX_LONGPATH + 1];
UINT cch = MAX_LONGPATH;
if (!PAL_GetPALDirectoryA(szSOSModulePath, &cch)) {
ExtErr("Error: Failed to get SOS module directory\n");
return;
}
char lpFilename[MAX_LONGPATH + 12]; // + 12 to make enough room for strcat function.
strcpy_s(lpFilename, _countof(lpFilename), szSOSModulePath);
strcat_s(lpFilename, _countof(lpFilename), "sosdocsunix.txt");
HANDLE hSosDocFile = CreateFileA(lpFilename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL);
if (hSosDocFile == INVALID_HANDLE_VALUE) {
ExtErr("Error finding documentation file\n");
return;
}
HANDLE hMappedSosDocFile = CreateFileMappingA(hSosDocFile, NULL, PAGE_READONLY, 0, 0, NULL);
CloseHandle(hSosDocFile);
if (hMappedSosDocFile == NULL) {
ExtErr("Error mapping documentation file\n");
return;
}
pText = (LPSTR)MapViewOfFile(hMappedSosDocFile, FILE_MAP_READ, 0, 0, 0);
CloseHandle(hMappedSosDocFile);
if (pText == NULL)
{
ExtErr("Error loading documentation file\n");
return;
}
#endif
}
// Find our line in the text file
char searchString[MAX_LONGPATH];
sprintf_s(searchString, _countof(searchString), "COMMAND: %s.", pszCmdName);
LPSTR pStart = strstr(pText, searchString);
LPSTR pEnd = NULL;
if (!pStart)
{
ExtErr("Documentation for %s not found.\n", pszCmdName);
return;
}
// Go to the end of this line:
pStart = strchr(pStart, '\n');
if (!pStart)
{
ExtErr("Expected newline in documentation resource.\n");
return;
}
// Bypass the newline that pStart points to and setup pEnd for the loop below. We set
// pEnd to be the old pStart since we add one to it when we call strstr.
pEnd = pStart++;
// Find the first occurrence of \\ followed by an \r or an \n on a line by itself.
do
{
pEnd = strstr(pEnd+1, "\\\\");
} while (pEnd && ((pEnd[-1] != '\r' && pEnd[-1] != '\n') || (pEnd[3] != '\r' && pEnd[3] != '\n')));
if (pEnd)
{
// We have found a \\ followed by a \r or \n. Do not print out the character pEnd points
// to, as this will be the first \ (this is why we don't add one to the second parameter).
ExtOut("%.*s", pEnd - pStart, pStart);
}
else
{
// If pEnd is false then we have run to the end of the document. However, we did find
// the command to print, so we should simply print to the end of the file. We'll add
// an extra newline here in case the file does not contain one.
ExtOut("%s\n", pStart);
}
}
/**********************************************************************\
* Routine Description: *
* *
* This function displays the commands available in strike and the *
* arguments passed into each.
* *
\**********************************************************************/
DECLARE_API(Help)
{
INIT_API_EXT();
StringHolder commandName;
CMDValue arg[] =
{
{&commandName.data, COSTRING}
};
size_t nArg;
if (!GetCMDOption(args, NULL, 0, arg, _countof(arg), &nArg))
{
return Status;
}
ExtOut("-------------------------------------------------------------------------------\n");
if (nArg == 1)
{
// Convert commandName to lower-case
LPSTR curChar = commandName.data;
while (*curChar != '\0')
{
if ( ((unsigned) *curChar <= 0x7F) && isupper(*curChar))
{
*curChar = (CHAR) tolower(*curChar);
}
curChar++;
}
// Strip off leading "!" if the user put that.
curChar = commandName.data;
if (*curChar == '!')
curChar++;
PrintHelp (curChar);
}
else
{
PrintHelp ("contents");
}
return S_OK;
}
| 1 | 11,107 | Don't use ! in the messages because it isn't used on xplat. There is a SOSPrefix define that can be used (blank on xplat and ! on Windows). Do we really need 3 newlines? | dotnet-diagnostics | cpp |
@@ -52,7 +52,13 @@ def _dagster_home():
'DAGSTER_HOME is not set, check is_dagster_home_set before invoking.'
)
- return os.path.expanduser(dagster_home_path)
+ dagster_home_path = os.path.expanduser(dagster_home_path)
+
+ if not os.path.isabs(dagster_home_path):
+ raise DagsterInvariantViolationError(
+ 'DAGSTER_HOME must be absolute path'
+ )
+ return dagster_home_path
def _check_run_equality(pipeline_run, candidate_run): | 1 | import datetime
import logging
import os
import time
from abc import ABCMeta
from collections import defaultdict, namedtuple
from enum import Enum
import six
import yaml
from rx import Observable
from dagster import check, seven
from dagster.config import Field, Permissive
from dagster.core.definitions.events import AssetKey
from dagster.core.definitions.pipeline import PipelineDefinition, PipelineSubsetDefinition
from dagster.core.errors import (
DagsterInvalidConfigError,
DagsterInvariantViolationError,
DagsterRunAlreadyExists,
DagsterRunConflict,
)
from dagster.core.storage.migration.utils import upgrading_instance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus, PipelineRunsFilter
from dagster.core.utils import str_format_list
from dagster.serdes import ConfigurableClass, whitelist_for_serdes
from dagster.seven import get_current_datetime_in_utc
from dagster.utils.merger import merge_dicts
from dagster.utils.yaml_utils import load_yaml_from_globs
from .config import DAGSTER_CONFIG_YAML_FILENAME
from .ref import InstanceRef, compute_logs_directory
# 'airflow_execution_date' and 'is_airflow_ingest_pipeline' are hardcoded tags used in the
# airflow ingestion logic (see: dagster_pipeline_factory.py). 'airflow_execution_date' stores the
# 'execution_date' used in Airflow operator execution and 'is_airflow_ingest_pipeline' determines
# whether 'airflow_execution_date' is needed.
# https://github.com/dagster-io/dagster/issues/2403
AIRFLOW_EXECUTION_DATE_STR = 'airflow_execution_date'
IS_AIRFLOW_INGEST_PIPELINE_STR = 'is_airflow_ingest_pipeline'
def _is_dagster_home_set():
return bool(os.getenv('DAGSTER_HOME'))
def _dagster_home():
dagster_home_path = os.getenv('DAGSTER_HOME')
if not dagster_home_path:
raise DagsterInvariantViolationError(
'DAGSTER_HOME is not set, check is_dagster_home_set before invoking.'
)
return os.path.expanduser(dagster_home_path)
def _check_run_equality(pipeline_run, candidate_run):
check.inst_param(pipeline_run, 'pipeline_run', PipelineRun)
check.inst_param(candidate_run, 'candidate_run', PipelineRun)
field_diff = {}
for field in pipeline_run._fields:
expected_value = getattr(pipeline_run, field)
candidate_value = getattr(candidate_run, field)
if expected_value != candidate_value:
field_diff[field] = (expected_value, candidate_value)
return field_diff
def _format_field_diff(field_diff):
return '\n'.join(
[
(
' {field_name}:\n'
+ ' Expected: {expected_value}\n'
+ ' Received: {candidate_value}'
).format(
field_name=field_name,
expected_value=expected_value,
candidate_value=candidate_value,
)
for field_name, (expected_value, candidate_value,) in field_diff.items()
]
)
class _EventListenerLogHandler(logging.Handler):
def __init__(self, instance):
self._instance = instance
super(_EventListenerLogHandler, self).__init__()
def emit(self, record):
from dagster.core.events.log import construct_event_record, StructuredLoggerMessage
try:
event = construct_event_record(
StructuredLoggerMessage(
name=record.name,
message=record.msg,
level=record.levelno,
meta=record.dagster_meta,
record=record,
)
)
self._instance.handle_new_event(event)
except Exception as e: # pylint: disable=W0703
logging.critical('Error during instance event listen')
logging.exception(str(e))
raise
class InstanceType(Enum):
PERSISTENT = 'PERSISTENT'
EPHEMERAL = 'EPHEMERAL'
class DagsterInstance:
'''Core abstraction for managing Dagster's access to storage and other resources.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
For example, to use Postgres for run and event log storage, you can write a ``dagster.yaml``
such as the following:
.. literalinclude:: ../../../../docs/sections/deploying/postgres_dagster.yaml
:caption: dagster.yaml
:language: YAML
Args:
instance_type (InstanceType): Indicates whether the instance is ephemeral or persistent.
Users should not attempt to set this value directly or in their ``dagster.yaml`` files.
local_artifact_storage (LocalArtifactStorage): The local artifact storage is used to
configure storage for any artifacts that require a local disk, such as schedules, or
when using the filesystem system storage to manage files and intermediates. By default,
this will be a :py:class:`dagster.core.storage.root.LocalArtifactStorage`. Configurable
in ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass`
machinery.
run_storage (RunStorage): The run storage is used to store metadata about ongoing and past
pipeline runs. By default, this will be a
:py:class:`dagster.core.storage.runs.SqliteRunStorage`. Configurable in ``dagster.yaml``
using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.
event_storage (EventLogStorage): Used to store the structured event logs generated by
pipeline runs. By default, this will be a
:py:class:`dagster.core.storage.event_log.SqliteEventLogStorage`. Configurable in
``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.
compute_log_manager (ComputeLogManager): The compute log manager handles stdout and stderr
logging for solid compute functions. By default, this will be a
:py:class:`dagster.core.storage.local_compute_log_manager.LocalComputeLogManager`.
Configurable in ``dagster.yaml`` using the
:py:class:`~dagster.serdes.ConfigurableClass` machinery.
run_launcher (Optional[RunLauncher]): Optionally, a run launcher may be used to enable
a Dagster instance to launch pipeline runs, e.g. on a remote Kubernetes cluster, in
addition to running them locally.
dagit_settings (Optional[Dict]): Specifies certain Dagit-specific, per-instance settings,
such as feature flags. These are set in the ``dagster.yaml`` under the key ``dagit``.
telemetry_settings (Optional[Dict]): Specifies certain telemetry-specific, per-instance
settings, such as whether it is enabled. These are set in the ``dagster.yaml`` under
the key ``telemetry``
ref (Optional[InstanceRef]): Used by internal machinery to pass instances across process
boundaries.
'''
_PROCESS_TEMPDIR = None
def __init__(
self,
instance_type,
local_artifact_storage,
run_storage,
event_storage,
compute_log_manager,
schedule_storage=None,
scheduler=None,
run_launcher=None,
dagit_settings=None,
telemetry_settings=None,
ref=None,
):
from dagster.core.storage.compute_log_manager import ComputeLogManager
from dagster.core.storage.event_log import EventLogStorage
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import RunStorage
from dagster.core.storage.schedules import ScheduleStorage
from dagster.core.scheduler import Scheduler
from dagster.core.launcher import RunLauncher
self._instance_type = check.inst_param(instance_type, 'instance_type', InstanceType)
self._local_artifact_storage = check.inst_param(
local_artifact_storage, 'local_artifact_storage', LocalArtifactStorage
)
self._event_storage = check.inst_param(event_storage, 'event_storage', EventLogStorage)
self._run_storage = check.inst_param(run_storage, 'run_storage', RunStorage)
self._compute_log_manager = check.inst_param(
compute_log_manager, 'compute_log_manager', ComputeLogManager
)
self._schedule_storage = check.opt_inst_param(
schedule_storage, 'schedule_storage', ScheduleStorage
)
self._scheduler = check.opt_inst_param(scheduler, 'scheduler', Scheduler)
self._run_launcher = check.inst_param(run_launcher, 'run_launcher', RunLauncher)
self._dagit_settings = check.opt_dict_param(dagit_settings, 'dagit_settings')
self._telemetry_settings = check.opt_dict_param(telemetry_settings, 'telemetry_settings')
self._ref = check.opt_inst_param(ref, 'ref', InstanceRef)
self._subscribers = defaultdict(list)
# ctors
@staticmethod
def ephemeral(tempdir=None):
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager
if tempdir is None:
tempdir = DagsterInstance.temp_storage()
return DagsterInstance(
InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(tempdir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
compute_log_manager=NoOpComputeLogManager(),
run_launcher=SyncInMemoryRunLauncher(),
)
@staticmethod
def get(fallback_storage=None):
# 1. Use $DAGSTER_HOME to determine instance if set.
if _is_dagster_home_set():
return DagsterInstance.from_config(_dagster_home())
# 2. If that is not set use the fallback storage directory if provided.
# This allows us to have a nice out of the box dagit experience where runs are persisted
# across restarts in a tempdir that gets cleaned up when the dagit watchdog process exits.
elif fallback_storage is not None:
return DagsterInstance.from_config(fallback_storage)
# 3. If all else fails create an ephemeral in memory instance.
else:
return DagsterInstance.ephemeral(fallback_storage)
@staticmethod
def local_temp(tempdir=None, overrides=None):
if tempdir is None:
tempdir = DagsterInstance.temp_storage()
return DagsterInstance.from_ref(InstanceRef.from_dir(tempdir, overrides=overrides))
@staticmethod
def from_config(config_dir, config_filename=DAGSTER_CONFIG_YAML_FILENAME):
instance_ref = InstanceRef.from_dir(config_dir, config_filename=config_filename)
return DagsterInstance.from_ref(instance_ref)
@staticmethod
def from_ref(instance_ref):
check.inst_param(instance_ref, 'instance_ref', InstanceRef)
return DagsterInstance(
instance_type=InstanceType.PERSISTENT,
local_artifact_storage=instance_ref.local_artifact_storage,
run_storage=instance_ref.run_storage,
event_storage=instance_ref.event_storage,
compute_log_manager=instance_ref.compute_log_manager,
schedule_storage=instance_ref.schedule_storage,
scheduler=instance_ref.scheduler,
run_launcher=instance_ref.run_launcher,
dagit_settings=instance_ref.dagit_settings,
telemetry_settings=instance_ref.telemetry_settings,
ref=instance_ref,
)
# flags
@property
def is_persistent(self):
return self._instance_type == InstanceType.PERSISTENT
@property
def is_ephemeral(self):
return self._instance_type == InstanceType.EPHEMERAL
def get_ref(self):
if self._ref:
return self._ref
check.failed('Can not produce an instance reference for {t}'.format(t=self))
@property
def root_directory(self):
return self._local_artifact_storage.base_dir
@staticmethod
def temp_storage():
if DagsterInstance._PROCESS_TEMPDIR is None:
DagsterInstance._PROCESS_TEMPDIR = seven.TemporaryDirectory()
return DagsterInstance._PROCESS_TEMPDIR.name
def _info(self, component):
prefix = ' '
# ConfigurableClass may not have inst_data if it's a direct instantiation
# which happens for ephemeral instances
if isinstance(component, ConfigurableClass) and component.inst_data:
return component.inst_data.info_str(prefix)
if type(component) is dict:
return prefix + yaml.dump(component, default_flow_style=False).replace(
'\n', '\n' + prefix
)
return '{}{}\n'.format(prefix, component.__class__.__name__)
def info_str_for_component(self, component_name, component):
return '{component_name}:\n{component}\n'.format(
component_name=component_name, component=self._info(component)
)
def info_str(self):
dagit_settings = self._dagit_settings if self._dagit_settings else None
telemetry_settings = self._telemetry_settings if self._telemetry_settings else None
return (
'DagsterInstance components:\n\n'
' Local Artifacts Storage:\n{artifact}\n'
' Run Storage:\n{run}\n'
' Event Log Storage:\n{event}\n'
' Compute Log Manager:\n{compute}\n'
' Schedule Storage:\n{schedule_storage}\n'
' Scheduler:\n{scheduler}\n'
' Run Launcher:\n{run_launcher}\n'
' Dagit:\n{dagit}\n'
' Telemetry:\n{telemetry}\n'
''.format(
artifact=self._info(self._local_artifact_storage),
run=self._info(self._run_storage),
event=self._info(self._event_storage),
compute=self._info(self._compute_log_manager),
schedule_storage=self._info(self._schedule_storage),
scheduler=self._info(self._scheduler),
run_launcher=self._info(self._run_launcher),
dagit=self._info(dagit_settings),
telemetry=self._info(telemetry_settings),
)
)
# schedule storage
@property
def schedule_storage(self):
return self._schedule_storage
# schedule storage
@property
def scheduler(self):
return self._scheduler
# run launcher
@property
def run_launcher(self):
return self._run_launcher
# compute logs
@property
def compute_log_manager(self):
return self._compute_log_manager
@property
def dagit_settings(self):
if self._dagit_settings:
return self._dagit_settings
return {}
@property
def telemetry_enabled(self):
if self.is_ephemeral:
return False
dagster_telemetry_enabled_default = True
if not self._telemetry_settings:
return dagster_telemetry_enabled_default
if 'enabled' in self._telemetry_settings:
return self._telemetry_settings['enabled']
else:
return dagster_telemetry_enabled_default
def upgrade(self, print_fn=lambda _: None):
with upgrading_instance(self):
print_fn('Updating run storage...')
self._run_storage.upgrade()
print_fn('Updating event storage...')
self._event_storage.upgrade()
print_fn('Updating schedule storage...')
self._schedule_storage.upgrade()
def dispose(self):
self._run_storage.dispose()
self._event_storage.dispose()
# run storage
def get_run_by_id(self, run_id):
return self._run_storage.get_run_by_id(run_id)
def get_pipeline_snapshot(self, snapshot_id):
return self._run_storage.get_pipeline_snapshot(snapshot_id)
def has_pipeline_snapshot(self, snapshot_id):
return self._run_storage.has_pipeline_snapshot(snapshot_id)
def get_historical_pipeline(self, snapshot_id):
from dagster.core.host_representation import HistoricalPipeline
snapshot = self._run_storage.get_pipeline_snapshot(snapshot_id)
parent_snapshot = (
self._run_storage.get_pipeline_snapshot(snapshot.lineage_snapshot.parent_snapshot_id)
if snapshot.lineage_snapshot
else None
)
return HistoricalPipeline(
self._run_storage.get_pipeline_snapshot(snapshot_id), snapshot_id, parent_snapshot
)
def has_historical_pipeline(self, snapshot_id):
return self._run_storage.has_pipeline_snapshot(snapshot_id)
def get_execution_plan_snapshot(self, snapshot_id):
return self._run_storage.get_execution_plan_snapshot(snapshot_id)
def get_run_stats(self, run_id):
return self._event_storage.get_stats_for_run(run_id)
def get_run_step_stats(self, run_id):
return self._event_storage.get_step_stats_for_run(run_id)
def get_run_tags(self):
return self._run_storage.get_run_tags()
def get_run_group(self, run_id):
return self._run_storage.get_run_group(run_id)
def create_run_for_pipeline(
self,
pipeline_def,
execution_plan=None,
run_id=None,
run_config=None,
mode=None,
solids_to_execute=None,
step_keys_to_execute=None,
status=None,
tags=None,
root_run_id=None,
parent_run_id=None,
solid_selection=None,
):
from dagster.core.execution.api import create_execution_plan
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.snap import snapshot_from_execution_plan
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.opt_inst_param(execution_plan, 'execution_plan', ExecutionPlan)
# note that solids_to_execute is required to execute the solid subset, which is the
# frozenset version of the previous solid_subset.
# solid_selection is not required and will not be converted to solids_to_execute here.
# i.e. this function doesn't handle solid queries.
# solid_selection is only used to pass the user queries further down.
check.opt_set_param(solids_to_execute, 'solids_to_execute', of_type=str)
check.opt_list_param(solid_selection, 'solid_selection', of_type=str)
if solids_to_execute:
if isinstance(pipeline_def, PipelineSubsetDefinition):
# for the case when pipeline_def is created by ExecutablePipeline or ExternalPipeline
check.invariant(
solids_to_execute == pipeline_def.solids_to_execute,
'Cannot create a PipelineRun from pipeline subset {pipeline_solids_to_execute} '
'that conflicts with solids_to_execute arg {solids_to_execute}'.format(
pipeline_solids_to_execute=str_format_list(pipeline_def.solids_to_execute),
solids_to_execute=str_format_list(solids_to_execute),
),
)
else:
# for cases when `create_run_for_pipeline` is directly called
pipeline_def = pipeline_def.get_pipeline_subset_def(
solids_to_execute=solids_to_execute
)
if execution_plan is None:
execution_plan = create_execution_plan(
pipeline_def,
run_config=run_config,
mode=mode,
step_keys_to_execute=step_keys_to_execute,
)
return self.create_run(
pipeline_name=pipeline_def.name,
run_id=run_id,
run_config=run_config,
mode=check.opt_str_param(mode, 'mode', default=pipeline_def.get_default_mode_name()),
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=status,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot=pipeline_def.get_pipeline_snapshot(),
execution_plan_snapshot=snapshot_from_execution_plan(
execution_plan, pipeline_def.get_pipeline_snapshot_id()
),
parent_pipeline_snapshot=pipeline_def.get_parent_pipeline_snapshot(),
)
def _construct_run_with_snapshots(
self,
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
status,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
solid_selection=None,
):
# https://github.com/dagster-io/dagster/issues/2403
if tags and IS_AIRFLOW_INGEST_PIPELINE_STR in tags:
if AIRFLOW_EXECUTION_DATE_STR not in tags:
tags[AIRFLOW_EXECUTION_DATE_STR] = get_current_datetime_in_utc().isoformat()
check.invariant(
not (not pipeline_snapshot and execution_plan_snapshot),
'It is illegal to have an execution plan snapshot and not have a pipeline snapshot. '
'It is possible to have no execution plan snapshot since we persist runs '
'that do not successfully compile execution plans in the scheduled case.',
)
pipeline_snapshot_id = (
self._ensure_persisted_pipeline_snapshot(pipeline_snapshot, parent_pipeline_snapshot)
if pipeline_snapshot
else None
)
execution_plan_snapshot_id = (
self._ensure_persisted_execution_plan_snapshot(
execution_plan_snapshot, pipeline_snapshot_id, step_keys_to_execute
)
if execution_plan_snapshot and pipeline_snapshot_id
else None
)
return PipelineRun(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=status,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot_id=pipeline_snapshot_id,
execution_plan_snapshot_id=execution_plan_snapshot_id,
)
def _ensure_persisted_pipeline_snapshot(self, pipeline_snapshot, parent_pipeline_snapshot):
from dagster.core.snap import create_pipeline_snapshot_id, PipelineSnapshot
check.inst_param(pipeline_snapshot, 'pipeline_snapshot', PipelineSnapshot)
check.opt_inst_param(parent_pipeline_snapshot, 'parent_pipeline_snapshot', PipelineSnapshot)
if pipeline_snapshot.lineage_snapshot:
if not self._run_storage.has_pipeline_snapshot(
pipeline_snapshot.lineage_snapshot.parent_snapshot_id
):
check.invariant(
create_pipeline_snapshot_id(parent_pipeline_snapshot)
== pipeline_snapshot.lineage_snapshot.parent_snapshot_id,
'Parent pipeline snapshot id out of sync with passed parent pipeline snapshot',
)
returned_pipeline_snapshot_id = self._run_storage.add_pipeline_snapshot(
parent_pipeline_snapshot
)
check.invariant(
pipeline_snapshot.lineage_snapshot.parent_snapshot_id
== returned_pipeline_snapshot_id
)
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
if not self._run_storage.has_pipeline_snapshot(pipeline_snapshot_id):
returned_pipeline_snapshot_id = self._run_storage.add_pipeline_snapshot(
pipeline_snapshot
)
check.invariant(pipeline_snapshot_id == returned_pipeline_snapshot_id)
return pipeline_snapshot_id
def _ensure_persisted_execution_plan_snapshot(
self, execution_plan_snapshot, pipeline_snapshot_id, step_keys_to_execute
):
from dagster.core.snap.execution_plan_snapshot import (
ExecutionPlanSnapshot,
create_execution_plan_snapshot_id,
)
check.inst_param(execution_plan_snapshot, 'execution_plan_snapshot', ExecutionPlanSnapshot)
check.str_param(pipeline_snapshot_id, 'pipeline_snapshot_id')
check.opt_list_param(step_keys_to_execute, 'step_keys_to_execute', of_type=str)
check.invariant(
execution_plan_snapshot.pipeline_snapshot_id == pipeline_snapshot_id,
(
'Snapshot mismatch: Snapshot ID in execution plan snapshot is '
'"{ep_pipeline_snapshot_id}" and snapshot_id created in memory is '
'"{pipeline_snapshot_id}"'
).format(
ep_pipeline_snapshot_id=execution_plan_snapshot.pipeline_snapshot_id,
pipeline_snapshot_id=pipeline_snapshot_id,
),
)
check.invariant(
set(step_keys_to_execute) == set(execution_plan_snapshot.step_keys_to_execute)
if step_keys_to_execute
else set(execution_plan_snapshot.step_keys_to_execute)
== set([step.key for step in execution_plan_snapshot.steps]),
'We encode step_keys_to_execute twice in our stack, unfortunately. This check '
'ensures that they are consistent. We check that step_keys_to_execute in the plan '
'matches the step_keys_to_execute params if it is set. If it is not, this indicates '
'a full execution plan, and so we verify that.',
)
execution_plan_snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot)
if not self._run_storage.has_execution_plan_snapshot(execution_plan_snapshot_id):
returned_execution_plan_snapshot_id = self._run_storage.add_execution_plan_snapshot(
execution_plan_snapshot
)
check.invariant(execution_plan_snapshot_id == returned_execution_plan_snapshot_id)
return execution_plan_snapshot_id
def create_run(
self,
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
status,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
solid_selection=None,
):
pipeline_run = self._construct_run_with_snapshots(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=status,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot=pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=parent_pipeline_snapshot,
)
return self._run_storage.add_run(pipeline_run)
def register_managed_run(
self,
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
solid_selection=None,
):
# The usage of this method is limited to dagster-airflow, specifically in Dagster
# Operators that are executed in Airflow. Because a common workflow in Airflow is to
# retry dags from arbitrary tasks, we need any node to be capable of creating a
# PipelineRun.
#
# The try-except DagsterRunAlreadyExists block handles the race when multiple "root" tasks
# simultaneously execute self._run_storage.add_run(pipeline_run). When this happens, only
# one task succeeds in creating the run, while the others get DagsterRunAlreadyExists
# error; at this point, the failed tasks try again to fetch the existing run.
# https://github.com/dagster-io/dagster/issues/2412
pipeline_run = self._construct_run_with_snapshots(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=PipelineRunStatus.MANAGED,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot=pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=parent_pipeline_snapshot,
)
def get_run():
candidate_run = self.get_run_by_id(pipeline_run.run_id)
field_diff = _check_run_equality(pipeline_run, candidate_run)
if field_diff:
raise DagsterRunConflict(
'Found conflicting existing run with same id {run_id}. Runs differ in:'
'\n{field_diff}'.format(
run_id=pipeline_run.run_id, field_diff=_format_field_diff(field_diff),
),
)
return candidate_run
if self.has_run(pipeline_run.run_id):
return get_run()
try:
return self._run_storage.add_run(pipeline_run)
except DagsterRunAlreadyExists:
return get_run()
def add_run(self, pipeline_run):
return self._run_storage.add_run(pipeline_run)
def handle_run_event(self, run_id, event):
return self._run_storage.handle_run_event(run_id, event)
def has_run(self, run_id):
return self._run_storage.has_run(run_id)
def get_runs(self, filters=None, cursor=None, limit=None):
return self._run_storage.get_runs(filters, cursor, limit)
def get_runs_count(self, filters=None):
return self._run_storage.get_runs_count(filters)
def get_run_groups(self, filters=None, cursor=None, limit=None):
return self._run_storage.get_run_groups(filters=filters, cursor=cursor, limit=limit)
def wipe(self):
self._run_storage.wipe()
self._event_storage.wipe()
def wipe_assets(self, asset_keys):
check.list_param(asset_keys, 'asset_keys', of_type=AssetKey)
from dagster.core.storage.event_log.base import AssetAwareEventLogStorage
if not isinstance(self._event_storage, AssetAwareEventLogStorage):
return
for asset_key in asset_keys:
self._event_storage.wipe_asset(asset_key)
def wipe_all_assets(self):
from dagster.core.storage.event_log.base import AssetAwareEventLogStorage
if not isinstance(self._event_storage, AssetAwareEventLogStorage):
return
for asset_key in self._event_storage.get_all_asset_keys():
self._event_storage.wipe_asset(asset_key)
def delete_run(self, run_id):
self._run_storage.delete_run(run_id)
self._event_storage.delete_events(run_id)
# event storage
def logs_after(self, run_id, cursor):
return self._event_storage.get_logs_for_run(run_id, cursor=cursor)
def all_logs(self, run_id):
return self._event_storage.get_logs_for_run(run_id)
def watch_event_logs(self, run_id, cursor, cb):
return self._event_storage.watch(run_id, cursor, cb)
# event subscriptions
def get_logger(self):
logger = logging.Logger('__event_listener')
logger.addHandler(_EventListenerLogHandler(self))
logger.setLevel(10)
return logger
def handle_new_event(self, event):
run_id = event.run_id
self._event_storage.store_event(event)
if event.is_dagster_event and event.dagster_event.is_pipeline_event:
self._run_storage.handle_run_event(run_id, event.dagster_event)
for sub in self._subscribers[run_id]:
sub(event)
def add_event_listener(self, run_id, cb):
self._subscribers[run_id].append(cb)
def report_engine_event(
self, message, pipeline_run, engine_event_data=None, cls=None, step_key=None,
):
'''
Report a EngineEvent that occurred outside of a pipeline execution context.
'''
from dagster.core.events import EngineEventData, DagsterEvent, DagsterEventType
from dagster.core.events.log import DagsterEventRecord
check.class_param(cls, 'cls')
check.str_param(message, 'message')
check.inst_param(pipeline_run, 'pipeline_run', PipelineRun)
engine_event_data = check.opt_inst_param(
engine_event_data, 'engine_event_data', EngineEventData, EngineEventData([]),
)
if cls:
message = "[{}] {}".format(cls.__name__, message)
log_level = logging.INFO
if engine_event_data and engine_event_data.error:
log_level = logging.ERROR
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.ENGINE_EVENT.value,
pipeline_name=pipeline_run.pipeline_name,
message=message,
event_specific_data=engine_event_data,
)
event_record = DagsterEventRecord(
message=message,
user_message=message,
level=log_level,
pipeline_name=pipeline_run.pipeline_name,
run_id=pipeline_run.run_id,
error_info=None,
timestamp=time.time(),
step_key=step_key,
dagster_event=dagster_event,
)
self.handle_new_event(event_record)
return dagster_event
def report_run_failed(self, pipeline_run):
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.events.log import DagsterEventRecord
check.inst_param(pipeline_run, 'pipeline_run', PipelineRun)
message = "This pipeline run has been marked as failed from outside the execution context"
event_record = DagsterEventRecord(
message=message,
user_message=message,
level=logging.ERROR,
pipeline_name=pipeline_run.pipeline_name,
run_id=pipeline_run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_FAILURE.value,
pipeline_name=pipeline_run.pipeline_name,
message=message,
),
)
self.handle_new_event(event_record)
# directories
def file_manager_directory(self, run_id):
return self._local_artifact_storage.file_manager_dir(run_id)
def intermediates_directory(self, run_id):
return self._local_artifact_storage.intermediates_dir(run_id)
def schedules_directory(self):
return self._local_artifact_storage.schedules_dir
# Run launcher
def launch_run(self, run_id, external_pipeline):
'''Launch a pipeline run.
This method delegates to the ``RunLauncher``, if any, configured on the instance, and will
call its implementation of ``RunLauncher.launch_run()`` to begin the execution of the
specified run. Runs should be created in the instance (e.g., by calling
``DagsterInstance.create_run()``) *before* this method is called, and
should be in the ``PipelineRunStatus.NOT_STARTED`` state.
Args:
run_id (str): The id of the run the launch.
'''
run = self.get_run_by_id(run_id)
return self._run_launcher.launch_run(self, run, external_pipeline=external_pipeline)
# Scheduler
def reconcile_scheduler_state(self, external_repository):
return self._scheduler.reconcile_scheduler_state(self, external_repository)
def start_schedule_and_update_storage_state(self, external_schedule):
return self._scheduler.start_schedule_and_update_storage_state(self, external_schedule)
def stop_schedule_and_update_storage_state(self, schedule_origin_id):
return self._scheduler.stop_schedule_and_update_storage_state(self, schedule_origin_id)
def stop_schedule_and_delete_from_storage(self, schedule_origin_id):
return self._scheduler.stop_schedule_and_delete_from_storage(self, schedule_origin_id)
def running_schedule_count(self, schedule_origin_id):
if self._scheduler:
return self._scheduler.running_schedule_count(schedule_origin_id)
return 0
def scheduler_debug_info(self):
from dagster.core.scheduler import SchedulerDebugInfo, ScheduleStatus
errors = []
schedules = []
for schedule_state in self.all_stored_schedule_state():
if schedule_state.status == ScheduleStatus.RUNNING and not self.running_schedule_count(
schedule_state.schedule_origin_id
):
errors.append(
"Schedule {schedule_name} is set to be running, but the scheduler is not "
"running the schedule.".format(schedule_name=schedule_state.name)
)
elif schedule_state.status == ScheduleStatus.STOPPED and self.running_schedule_count(
schedule_state.schedule_origin_id
):
errors.append(
"Schedule {schedule_name} is set to be stopped, but the scheduler is still running "
"the schedule.".format(schedule_name=schedule_state.name)
)
if self.running_schedule_count(schedule_state.schedule_origin_id) > 1:
errors.append(
"Duplicate jobs found: More than one job for schedule {schedule_name} are "
"running on the scheduler.".format(schedule_name=schedule_state.name)
)
schedule_info = {
schedule_state.name: {
"status": schedule_state.status.value,
"cron_schedule": schedule_state.cron_schedule,
"python_path": schedule_state.pipeline_origin.executable_path,
"repository_pointer": schedule_state.pipeline_origin.get_repo_pointer().describe(),
"schedule_origin_id": schedule_state.schedule_origin_id,
"repository_origin_id": schedule_state.repository_origin_id,
}
}
schedules.append(yaml.safe_dump(schedule_info, default_flow_style=False))
return SchedulerDebugInfo(
scheduler_config_info=self.info_str_for_component('Scheduler', self.scheduler),
scheduler_info=self.scheduler.debug_info(),
schedule_storage=schedules,
errors=errors,
)
# Schedule Storage
def create_schedule_tick(self, schedule_tick_data):
return self._schedule_storage.create_schedule_tick(schedule_tick_data)
def update_schedule_tick(self, tick):
return self._schedule_storage.update_schedule_tick(tick)
def get_schedule_ticks(self, schedule_origin_id):
return self._schedule_storage.get_schedule_ticks(schedule_origin_id)
def get_schedule_tick_stats(self, schedule_origin_id):
return self._schedule_storage.get_schedule_tick_stats(schedule_origin_id)
def all_stored_schedule_state(self, repository_origin_id=None):
return self._schedule_storage.all_stored_schedule_state(repository_origin_id)
def get_schedule_state(self, schedule_origin_id):
return self._schedule_storage.get_schedule_state(schedule_origin_id)
def add_schedule_state(self, schedule_state):
return self._schedule_storage.add_schedule_state(schedule_state)
def update_schedule_state(self, schedule_state):
return self._schedule_storage.update_schedule_state(schedule_state)
def delete_schedule_state(self, schedule_origin_id):
return self._schedule_storage.delete_schedule_state(schedule_origin_id)
def wipe_all_schedules(self):
if self._scheduler:
self._scheduler.wipe(self)
self._schedule_storage.wipe()
def logs_path_for_schedule(self, schedule_origin_id):
return self._scheduler.get_logs_path(self, schedule_origin_id)
| 1 | 13,759 | print out what we got here in the error | dagster-io-dagster | py |
@@ -43,6 +43,9 @@ See the file COPYING for details.
)); \
} while (false)
+//Stores select query before evaluation
+struct jx *select_expr = NULL;
+
static struct jx *jx_check_errors(struct jx *j);
static struct jx *jx_eval_null(struct jx_operator *op, struct jx *left, struct jx *right) { | 1 | /*
Copyright (C) 2016- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include "jx_eval.h"
#include "debug.h"
#include "jx_function.h"
#include "jx_print.h"
#include <assert.h>
#include <string.h>
#include <stdbool.h>
#include <math.h>
// FAILOP(jx_operator *op, struct jx *left, struct jx *right, const char *message)
// left, right, and message are evaluated exactly once
#define FAILOP(op, left, right, message) do { \
assert(op); \
assert(message); \
struct jx *t = jx_operator(op->type, left, right); \
char *s = jx_print_string(t); \
struct jx *e = jx_error(jx_format( \
"on line %d, %s: %s", \
op->line, \
s, \
message \
)); \
jx_delete(t); \
free(s); \
return e; \
} while (false)
// FAILARR(struct jx *array, const char *message)
#define FAILARR(array, message) do { \
assert(array); \
assert(message); \
return jx_error(jx_format( \
"array reference on line %d: %s", \
array->line, \
message \
)); \
} while (false)
static struct jx *jx_check_errors(struct jx *j);
static struct jx *jx_eval_null(struct jx_operator *op, struct jx *left, struct jx *right) {
assert(op);
switch(op->type) {
case JX_OP_EQ:
return jx_boolean(1);
case JX_OP_NE:
return jx_boolean(0);
default: FAILOP(op, jx_null(), jx_null(), "unsupported operator on null");
}
}
static struct jx *jx_eval_boolean(struct jx_operator *op, struct jx *left, struct jx *right) {
int a = left ? left->u.boolean_value : 0;
int b = right ? right->u.boolean_value : 0;
assert(op);
switch(op->type) {
case JX_OP_EQ:
return jx_boolean(a==b);
case JX_OP_NE:
return jx_boolean(a!=b);
case JX_OP_AND:
return jx_boolean(a&&b);
case JX_OP_OR:
return jx_boolean(a||b);
case JX_OP_NOT:
return jx_boolean(!b);
default: FAILOP(op, jx_copy(left), jx_copy(right), "unsupported operator on boolean");
}
}
static struct jx *jx_eval_integer(struct jx_operator *op, struct jx *left, struct jx *right) {
jx_int_t a = left ? left->u.integer_value : 0;
jx_int_t b = right ? right->u.integer_value : 0;
assert(op);
switch(op->type) {
case JX_OP_EQ:
return jx_boolean(a==b);
case JX_OP_NE:
return jx_boolean(a!=b);
case JX_OP_LT:
return jx_boolean(a<b);
case JX_OP_LE:
return jx_boolean(a<=b);
case JX_OP_GT:
return jx_boolean(a>b);
case JX_OP_GE:
return jx_boolean(a>=b);
case JX_OP_ADD:
return jx_integer(a+b);
case JX_OP_SUB:
return jx_integer(a-b);
case JX_OP_MUL:
return jx_integer(a*b);
case JX_OP_DIV:
if(b==0) FAILOP(op, jx_copy(left), jx_copy(right), "division by zero");
return jx_integer(a/b);
case JX_OP_MOD:
if(b==0) FAILOP(op, jx_copy(left), jx_copy(right), "division by zero");
return jx_integer(a%b);
default: FAILOP(op, jx_copy(left), jx_copy(right), "unsupported operator on integer");
}
}
static struct jx *jx_eval_double(struct jx_operator *op, struct jx *left, struct jx *right) {
double a = left ? left->u.double_value : 0;
double b = right ? right->u.double_value : 0;
assert(op);
switch(op->type) {
case JX_OP_EQ:
return jx_boolean(a==b);
case JX_OP_NE:
return jx_boolean(a!=b);
case JX_OP_LT:
return jx_boolean(a<b);
case JX_OP_LE:
return jx_boolean(a<=b);
case JX_OP_GT:
return jx_boolean(a>b);
case JX_OP_GE:
return jx_boolean(a>=b);
case JX_OP_ADD:
return jx_double(a+b);
case JX_OP_SUB:
return jx_double(a-b);
case JX_OP_MUL:
return jx_double(a*b);
case JX_OP_DIV:
if(b==0) FAILOP(op, jx_copy(left), jx_copy(right), "division by zero");
return jx_double(a/b);
case JX_OP_MOD:
if(b==0) FAILOP(op, jx_copy(left), jx_copy(right), "division by zero");
return jx_double((jx_int_t)a%(jx_int_t)b);
default: FAILOP(op, jx_copy(left), jx_copy(right), "unsupported operator on double");
}
}
static struct jx *jx_eval_string(struct jx_operator *op, struct jx *left, struct jx *right) {
const char *a = left ? left->u.string_value : "";
const char *b = right ? right->u.string_value : "";
assert(op);
switch(op->type) {
case JX_OP_EQ:
return jx_boolean(0==strcmp(a,b));
case JX_OP_NE:
return jx_boolean(0!=strcmp(a,b));
case JX_OP_LT:
return jx_boolean(strcmp(a,b)<0);
case JX_OP_LE:
return jx_boolean(strcmp(a,b)<=0);
case JX_OP_GT:
return jx_boolean(strcmp(a,b)>0);
case JX_OP_GE:
return jx_boolean(strcmp(a,b)>=0);
case JX_OP_ADD:
return jx_format("%s%s",a,b);
default: FAILOP(op, jx_copy(left), jx_copy(right), "unsupported operator on string");
}
}
static struct jx *jx_eval_array(struct jx_operator *op, struct jx *left, struct jx *right) {
assert(op);
if (!(left && right)) FAILOP(op, jx_copy(left), jx_copy(right), "missing arguments to array operator");
switch(op->type) {
case JX_OP_EQ:
return jx_boolean(jx_equals(left, right));
case JX_OP_NE:
return jx_boolean(!jx_equals(left, right));
case JX_OP_ADD:
return jx_check_errors(jx_array_concat(jx_copy(left), jx_copy(right), NULL));
default: FAILOP(op, jx_copy(left), jx_copy(right), "unsupported operator on array");
}
}
static struct jx *jx_eval_call(struct jx *func, struct jx *args, struct jx *ctx) {
assert(func);
assert(args);
assert(args->type == JX_ARRAY);
if (!jx_istype(func, JX_SYMBOL)) {
jx_error(jx_format(
"on line %d, unknown function: %s",
func->line,
func->u.symbol_name
));
}
if (!strcmp(func->u.symbol_name, "range")) {
return jx_function_range(args);
} else if (!strcmp(func->u.symbol_name, "format")) {
return jx_function_format(args);
} else if (!strcmp(func->u.symbol_name, "join")) {
return jx_function_join(args);
} else if (!strcmp(func->u.symbol_name, "ceil")) {
return jx_function_ceil(args);
} else if (!strcmp(func->u.symbol_name, "floor")) {
return jx_function_floor(args);
} else if (!strcmp(func->u.symbol_name, "basename")) {
return jx_function_basename(args);
} else if (!strcmp(func->u.symbol_name, "dirname")) {
return jx_function_dirname(args);
} else if (!strcmp(func->u.symbol_name, "listdir")) {
return jx_function_listdir(args);
} else if (!strcmp(func->u.symbol_name, "escape")) {
return jx_function_escape(args);
} else if (!strcmp(func->u.symbol_name, "template")) {
return jx_function_template(args, ctx);
} else if (!strcmp(func->u.symbol_name, "len")) {
return jx_function_len(args);
} else {
return jx_error(jx_format(
"on line %d, unknown function: %s",
func->line,
func->u.symbol_name
));
}
}
static struct jx *jx_eval_slice(struct jx *array, struct jx *slice) {
assert(array);
assert(slice);
assert(slice->type == JX_OPERATOR);
assert(slice->u.oper.type == JX_OP_SLICE);
struct jx *left = slice->u.oper.left;
struct jx *right = slice->u.oper.right;
if (array->type != JX_ARRAY) {
return jx_error(jx_format(
"on line %d, only arrays support slicing",
right->line
));
}
if (left && left->type != JX_INTEGER) FAILOP((&slice->u.oper), jx_copy(left), jx_copy(right),
"slice indices must be integers");
if (right && right->type != JX_INTEGER) FAILOP((&slice->u.oper), jx_copy(left), jx_copy(right),
"slice indices must be integers");
struct jx *result = jx_array(NULL);
int len = jx_array_length(array);
// this is all SUPER inefficient
jx_int_t start = left ? left->u.integer_value : 0;
jx_int_t end = right ? right->u.integer_value : len;
if (start < 0) start += len;
if (end < 0) end += len;
for (jx_int_t i = start; i < end; ++i) {
struct jx *j = jx_array_index(array, i);
if (j) jx_array_append(result, jx_copy(j));
}
return result;
}
/*
Handle a lookup operator, which has two valid cases:
1 - left is an object, right is a string, return the named item in the object.
2 - left is an array, right is an integer, return the nth item in the array.
*/
static struct jx * jx_eval_lookup( struct jx *left, struct jx *right )
{
assert(right);
if(left->type==JX_OBJECT && right->type==JX_STRING) {
struct jx *r = jx_lookup(left,right->u.string_value);
if(r) {
return jx_copy(r);
} else {
return jx_error(jx_format(
"lookup on line %d, key not found",
right->line
));
}
} else if(left->type==JX_ARRAY && right->type==JX_INTEGER) {
struct jx_item *item = left->u.items;
int count = right->u.integer_value;
if (count < 0) {
count += jx_array_length(left);
if (count < 0) FAILARR(right, "index out of range");
}
while (count > 0) {
if (!item) FAILARR(right, "index out of range");
item = item->next;
count--;
}
if (item) {
return jx_copy(item->value);
} else {
FAILARR(right, "index out of range");
}
} else {
char *s = jx_print_string(right);
struct jx *err = jx_error(jx_format(
"on line %d, %s: invalid type for lookup",
right->line,
s
));
free(s);
return jx_error(err);
}
}
/*
Type conversion rules:
Generally, operators are not meant to be applied to unequal types.
NULL is the result of an operator on two incompatible expressions.
Exception: integers are promoted to doubles as needed.
Exception: string+x or x+string for atomic types results in converting x to string and concatenating.
Exception: When x and y are incompatible types, x==y returns FALSE and x!=y returns TRUE.
Exception: The lookup operation can be "object[string]" or "array[integer]"
*/
static struct jx * jx_eval_operator( struct jx_operator *o, struct jx *context )
{
if(!o) return 0;
struct jx *left = NULL;
struct jx *right = NULL;
struct jx *result = NULL;
right = jx_eval(o->right,context);
if (jx_istype(right, JX_ERROR)) {
result = right;
right = NULL;
goto DONE;
}
if (o->type == JX_OP_CALL) return jx_eval_call(o->left, right, context);
left = jx_eval(o->left,context);
if (jx_istype(left, JX_ERROR)) {
result = left;
left = NULL;
goto DONE;
}
if (o->type == JX_OP_SLICE) return jx_operator(JX_OP_SLICE, left, right);
if((left && right) && (left->type!=right->type) ) {
if( left->type==JX_INTEGER && right->type==JX_DOUBLE) {
struct jx *n = jx_double(left->u.integer_value);
jx_delete(left);
left = n;
} else if( left->type==JX_DOUBLE && right->type==JX_INTEGER) {
struct jx *n = jx_double(right->u.integer_value);
jx_delete(right);
right = n;
} else if(o->type==JX_OP_EQ) {
jx_delete(left);
jx_delete(right);
return jx_boolean(0);
} else if(o->type==JX_OP_NE) {
jx_delete(left);
jx_delete(right);
return jx_boolean(1);
} else if(o->type==JX_OP_LOOKUP) {
struct jx *r;
if (right->type == JX_OPERATOR && right->u.oper.type == JX_OP_SLICE) {
r = jx_eval_slice(left, right);
} else {
r = jx_eval_lookup(left, right);
}
jx_delete(left);
jx_delete(right);
return r;
} else if(o->type==JX_OP_ADD && jx_istype(left,JX_STRING) && jx_isatomic(right) ) {
char *str = jx_print_string(right);
jx_delete(right);
right = jx_string(str);
free(str);
/* fall through */
} else if(o->type==JX_OP_ADD && jx_istype(right,JX_STRING) && jx_isatomic(left) ) {
char *str = jx_print_string(left);
jx_delete(left);
left = jx_string(str);
free(str);
/* fall through */
} else {
FAILOP(o, left, right, "mismatched types for operator");
}
}
switch(right->type) {
case JX_NULL:
result = jx_eval_null(o, left, right);
break;
case JX_BOOLEAN:
result = jx_eval_boolean(o, left, right);
break;
case JX_INTEGER:
result = jx_eval_integer(o, left, right);
break;
case JX_DOUBLE:
result = jx_eval_double(o, left, right);
break;
case JX_STRING:
result = jx_eval_string(o, left, right);
break;
case JX_ARRAY:
result = jx_eval_array(o, left, right);
break;
default: FAILOP(o, left, right, "rvalue does not support operators");
}
DONE:
jx_delete(left);
jx_delete(right);
return result;
}
static struct jx_item *jx_eval_comprehension(struct jx *body, struct jx_comprehension *comp, struct jx *context) {
assert(body);
assert(comp);
struct jx *list = jx_eval(comp->elements, context);
if (jx_istype(list, JX_ERROR)) return jx_item(list, NULL);
if (!jx_istype(list, JX_ARRAY)) {
return jx_item(jx_error(jx_format(
"on line %d: list comprehension takes an array",
comp->line
)), NULL);
}
struct jx_item *result = NULL;
struct jx_item *tail = NULL;
struct jx *j = NULL;
void *i = NULL;
while ((j = jx_iterate_array(list, &i))) {
struct jx *ctx = jx_copy(context);
jx_insert(ctx, jx_string(comp->variable), jx_copy(j));
if (comp->condition) {
struct jx *cond = jx_eval(comp->condition, ctx);
if (jx_istype(cond, JX_ERROR)) {
jx_delete(ctx);
jx_delete(list);
jx_item_delete(result);
return jx_item(cond, NULL);
}
if (!jx_istype(cond, JX_BOOLEAN)) {
jx_delete(ctx);
jx_delete(list);
jx_item_delete(result);
char *s = jx_print_string(cond);
struct jx *err = jx_error(jx_format(
"on line %d, %s: list comprehension condition takes a boolean",
cond->line,
s
));
free(s);
return jx_item(err, NULL);
}
int ok = cond->u.boolean_value;
jx_delete(cond);
if (!ok) {
jx_delete(ctx);
continue;
}
}
if (comp->next) {
struct jx_item *val = jx_eval_comprehension(body, comp->next, ctx);
jx_delete(ctx);
if (result) {
tail->next = val;
} else {
result = tail = val;
}
// this is going to go over the list LOTS of times
// in the various recursive calls
while (tail && tail->next) tail = tail->next;
} else {
struct jx *val = jx_eval(body, ctx);
jx_delete(ctx);
if (!val) {
jx_delete(list);
jx_item_delete(result);
return NULL;
}
if (result) {
tail->next = jx_item(val, NULL);
tail = tail->next;
} else {
result = tail = jx_item(val, NULL);
}
}
}
jx_delete(list);
return result;
}
static struct jx_pair *jx_eval_pair(struct jx_pair *pair, struct jx *context) {
if (!pair) return 0;
return jx_pair(
jx_eval(pair->key, context),
jx_eval(pair->value, context),
jx_eval_pair(pair->next, context));
}
static struct jx_item *jx_eval_item(struct jx_item *item, struct jx *context) {
if (!item) return NULL;
if (item->comp) {
struct jx_item *result = jx_eval_comprehension(item->value, item->comp, context);
if (result) {
struct jx_item *i = result;
while (i->next) i = i->next;
i->next = jx_eval_item(item->next, context);
return result;
} else {
return jx_eval_item(item->next, context);
}
} else {
return jx_item(jx_eval(item->value, context),
jx_eval_item(item->next, context));
}
}
static struct jx *jx_check_errors(struct jx *j)
{
struct jx *err = NULL;
switch (j->type) {
case JX_ARRAY:
for(struct jx_item *i = j->u.items; i; i = i->next) {
if(jx_istype(i->value, JX_ERROR)) {
err = jx_copy(i->value);
jx_delete(j);
return err;
}
}
return j;
case JX_OBJECT:
for(struct jx_pair *p = j->u.pairs; p; p = p->next) {
if (jx_istype(p->key, JX_ERROR)) err = jx_copy(p->key);
if (!err && jx_istype(p->value, JX_ERROR)) err = jx_copy(p->value);
if (err) {
jx_delete(j);
return err;
}
}
return j;
default:
return j;
}
}
struct jx * jx_eval( struct jx *j, struct jx *context )
{
struct jx *result = NULL;
if (!j) return NULL;
if (context && !jx_istype(context, JX_OBJECT)) {
return jx_error(jx_string("context must be an object"));
}
switch(j->type) {
case JX_SYMBOL: {
struct jx *t = jx_lookup(context, j->u.symbol_name);
if (t) {
result = jx_eval(t,context);
break;
} else {
return jx_error(jx_format(
"on line %d, %s: undefined symbol",
j->line,
j->u.symbol_name
));
}
}
case JX_DOUBLE:
case JX_BOOLEAN:
case JX_INTEGER:
case JX_STRING:
case JX_ERROR:
case JX_NULL:
result = jx_copy(j);
break;
case JX_ARRAY:
result = jx_check_errors(jx_array(jx_eval_item(j->u.items, context)));
break;
case JX_OBJECT:
result = jx_check_errors(jx_object(jx_eval_pair(j->u.pairs, context)));
break;
case JX_OPERATOR:
result = jx_eval_operator(&j->u.oper, context);
break;
}
return result;
}
struct jx * jx_eval_with_defines( struct jx *j, struct jx *context )
{
// Find the define clause in j, if it exists.
struct jx *defines = jx_lookup(j,"define");
if(!defines) defines = jx_object(0);
if(!context) context = jx_object(0);
// Merge the context and defines into mcontext.
struct jx *mcontext = jx_merge(defines,context,0);
// Now use that to evaluate j.
struct jx * result = jx_eval(j,mcontext);
jx_delete(mcontext);
return result;
}
/*vim: set noexpandtab tabstop=4: */
| 1 | 14,874 | Does this need to be global? | cooperative-computing-lab-cctools | c |
@@ -1349,7 +1349,7 @@ public final class TreeMap<K, V> implements SortedMap<K, V>, Serializable {
@Override
public Seq<V> values() {
- return iterator().map(Tuple2::_2).toStream();
+ return map(Tuple2::_2);
}
// -- Object | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/
*
* Copyright 2014-2017 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import io.vavr.control.Option;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.*;
import java.util.stream.Collector;
/**
* SortedMap implementation, backed by a Red/Black Tree.
*
* @param <K> Key type
* @param <V> Value type
* @author Daniel Dietrich
*/
// DEV-NOTE: use entries.min().get() in favor of iterator().next(), it is faster!
public final class TreeMap<K, V> implements SortedMap<K, V>, Serializable {
private static final long serialVersionUID = 1L;
private final RedBlackTree<Tuple2<K, V>> entries;
private TreeMap(RedBlackTree<Tuple2<K, V>> entries) {
this.entries = entries;
}
/**
* Returns a {@link Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(Collector)} to obtain a
* {@link TreeMap}.
* <p>
* The natural comparator is used to compare TreeMap keys.
*
* @param <K> The key type
* @param <V> The value type
* @return A {@link TreeMap} Collector.
*/
public static <K extends Comparable<? super K>, V> Collector<Tuple2<K, V>, ArrayList<Tuple2<K, V>>, TreeMap<K, V>> collector() {
return createCollector(EntryComparator.natural());
}
/**
* Returns a {@link Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(Collector)} to obtain a
* {@link TreeMap}.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A {@link TreeMap} Collector.
*/
public static <K, V> Collector<Tuple2<K, V>, ArrayList<Tuple2<K, V>>, TreeMap<K, V>> collector(Comparator<? super K> keyComparator) {
return createCollector(EntryComparator.of(keyComparator));
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link TreeMap}.
* <p>
* The natural comparator is used to compare TreeMap keys.
*
* @param keyMapper The key mapper
* @param <K> The key type
* @param <V> The value type
* @param <T> Initial {@link java.util.stream.Stream} elements type
* @return A {@link TreeMap} Collector.
*/
public static <K extends Comparable<? super K>, V, T extends V> Collector<T, ArrayList<T>, TreeMap<K, V>> collector(
Function<? super T, ? extends K> keyMapper) {
Objects.requireNonNull(keyMapper, "key comparator is null");
Objects.requireNonNull(keyMapper, "keyMapper is null");
return createCollector(EntryComparator.natural(), keyMapper, v -> v);
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link TreeMap}.
* <p>
* The natural comparator is used to compare TreeMap keys.
*
* @param keyMapper The key mapper
* @param valueMapper The value mapper
* @param <K> The key type
* @param <V> The value type
* @param <T> Initial {@link java.util.stream.Stream} elements type
* @return A {@link TreeMap} Collector.
*/
public static <K extends Comparable<? super K>, V, T> Collector<T, ArrayList<T>, TreeMap<K, V>> collector(
Function<? super T, ? extends K> keyMapper,
Function<? super T, ? extends V> valueMapper) {
Objects.requireNonNull(keyMapper, "key comparator is null");
Objects.requireNonNull(keyMapper, "keyMapper is null");
Objects.requireNonNull(valueMapper, "valueMapper is null");
return createCollector(EntryComparator.natural(), keyMapper, valueMapper);
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link TreeMap}.
*
* @param keyMapper The key mapper
* @param <K> The key type
* @param <V> The value type
* @param <T> Initial {@link java.util.stream.Stream} elements type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A {@link TreeMap} Collector.
*/
public static <K, V, T extends V> Collector<T, ArrayList<T>, TreeMap<K, V>> collector(
Comparator<? super K> keyComparator,
Function<? super T, ? extends K> keyMapper) {
Objects.requireNonNull(keyMapper, "key comparator is null");
Objects.requireNonNull(keyMapper, "keyMapper is null");
return createCollector(EntryComparator.of(keyComparator), keyMapper, v -> v);
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link TreeMap}.
*
* @param keyMapper The key mapper
* @param valueMapper The value mapper
* @param <K> The key type
* @param <V> The value type
* @param <T> Initial {@link java.util.stream.Stream} elements type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A {@link TreeMap} Collector.
*/
public static <K, V, T> Collector<T, ArrayList<T>, TreeMap<K, V>> collector(
Comparator<? super K> keyComparator,
Function<? super T, ? extends K> keyMapper, Function<? super T, ? extends V> valueMapper) {
Objects.requireNonNull(keyMapper, "key comparator is null");
Objects.requireNonNull(keyMapper, "keyMapper is null");
Objects.requireNonNull(valueMapper, "valueMapper is null");
return createCollector(EntryComparator.of(keyComparator), keyMapper, valueMapper);
}
/**
* Returns the empty TreeMap. The underlying key comparator is the natural comparator of K.
*
* @param <K> The key type
* @param <V> The value type
* @return A new empty TreeMap.
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> empty() {
return new TreeMap<>(RedBlackTree.empty(EntryComparator.natural()));
}
/**
* Returns the empty TreeMap using the given key comparator.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new empty TreeMap.
*/
public static <K, V> TreeMap<K, V> empty(Comparator<? super K> keyComparator) {
return new TreeMap<>(RedBlackTree.empty(EntryComparator.of(keyComparator)));
}
/**
* Narrows a widened {@code TreeMap<? extends K, ? extends V>} to {@code TreeMap<K, V>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
* <p>
* CAUTION: If {@code K} is narrowed, the underlying {@code Comparator} might fail!
*
* @param treeMap A {@code TreeMap}.
* @param <K> Key type
* @param <V> Value type
* @return the given {@code treeMap} instance as narrowed type {@code TreeMap<K, V>}.
*/
@SuppressWarnings("unchecked")
public static <K, V> TreeMap<K, V> narrow(TreeMap<? extends K, ? extends V> treeMap) {
return (TreeMap<K, V>) treeMap;
}
/**
* Returns a singleton {@code TreeMap}, i.e. a {@code TreeMap} of one entry.
* The underlying key comparator is the natural comparator of K.
*
* @param <K> The key type
* @param <V> The value type
* @param entry A map entry.
* @return A new TreeMap containing the given entry.
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(Tuple2<? extends K, ? extends V> entry) {
Objects.requireNonNull(entry, "entry is null");
return createFromTuple(EntryComparator.natural(), entry);
}
/**
* Returns a singleton {@code TreeMap}, i.e. a {@code TreeMap} of one entry using a specific key comparator.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @param entry A map entry.
* @return A new TreeMap containing the given entry.
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, Tuple2<? extends K, ? extends V> entry) {
Objects.requireNonNull(entry, "entry is null");
return createFromTuple(EntryComparator.of(keyComparator), entry);
}
/**
* Returns a {@code TreeMap}, from a source java.util.Map.
*
* @param map A map
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given map
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> ofAll(java.util.Map<? extends K, ? extends V> map) {
Objects.requireNonNull(map, "map is null");
return createFromMap(EntryComparator.natural(), map);
}
/**
* Returns a {@code TreeMap}, from entries mapped from stream.
*
* @param stream the source stream
* @param keyMapper the key mapper
* @param valueMapper the value mapper
* @param <T> The stream element type
* @param <K> The key type
* @param <V> The value type
* @return A new Map
*/
public static <T, K extends Comparable<? super K>, V> TreeMap<K, V> ofAll(java.util.stream.Stream<? extends T> stream,
Function<? super T, ? extends K> keyMapper,
Function<? super T, ? extends V> valueMapper) {
return Maps.ofStream(TreeMap.<K, V> empty(), stream, keyMapper, valueMapper);
}
/**
* Returns a {@code TreeMap}, from entries mapped from stream.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param stream the source stream
* @param keyMapper the key mapper
* @param valueMapper the value mapper
* @param <T> The stream element type
* @param <K> The key type
* @param <V> The value type
* @return A new Map
*/
public static <T, K, V> TreeMap<K, V> ofAll(Comparator<? super K> keyComparator,
java.util.stream.Stream<? extends T> stream,
Function<? super T, ? extends K> keyMapper,
Function<? super T, ? extends V> valueMapper) {
return Maps.ofStream(empty(keyComparator), stream, keyMapper, valueMapper);
}
/**
* Returns a {@code TreeMap}, from entries mapped from stream.
*
* @param stream the source stream
* @param entryMapper the entry mapper
* @param <T> The stream element type
* @param <K> The key type
* @param <V> The value type
* @return A new Map
*/
public static <T, K extends Comparable<? super K>, V> TreeMap<K, V> ofAll(java.util.stream.Stream<? extends T> stream,
Function<? super T, Tuple2<? extends K, ? extends V>> entryMapper) {
return Maps.ofStream(TreeMap.<K, V> empty(), stream, entryMapper);
}
/**
* Returns a {@code TreeMap}, from entries mapped from stream.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param stream the source stream
* @param entryMapper the entry mapper
* @param <T> The stream element type
* @param <K> The key type
* @param <V> The value type
* @return A new Map
*/
public static <T, K, V> TreeMap<K, V> ofAll(Comparator<? super K> keyComparator,
java.util.stream.Stream<? extends T> stream,
Function<? super T, Tuple2<? extends K, ? extends V>> entryMapper) {
return Maps.ofStream(empty(keyComparator), stream, entryMapper);
}
/**
* Returns a {@code TreeMap}, from a source java.util.Map.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param map A map
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given map
*/
public static <K, V> TreeMap<K, V> ofAll(Comparator<? super K> keyComparator, java.util.Map<? extends K, ? extends V> map) {
Objects.requireNonNull(map, "map is null");
return createFromMap(EntryComparator.of(keyComparator), map);
}
/**
* Returns a singleton {@code TreeMap}, i.e. a {@code TreeMap} of one element.
*
* @param key A singleton map key.
* @param value A singleton map value.
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entry
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K key, V value) {
return createFromPairs(EntryComparator.natural(), key, value);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8, K k9, V v9) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param k10 a key for the map
* @param v10 the value for k10
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8, K k9, V v9, K k10, V v10) {
return createFromPairs(EntryComparator.natural(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9, k10, v10);
}
/**
* Returns a singleton {@code TreeMap}, i.e. a {@code TreeMap} of one element.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param key A singleton map key.
* @param value A singleton map value.
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entry
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K key, V value) {
return createFromPairs(EntryComparator.of(keyComparator), key, value);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8, K k9, V v9) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9);
}
/**
* Creates a {@code TreeMap} of the given list of key-value pairs.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param k10 a key for the map
* @param v10 the value for k10
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> TreeMap<K, V> of(Comparator<? super K> keyComparator, K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8, K k9, V v9, K k10, V v10) {
return createFromPairs(EntryComparator.of(keyComparator), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9, k10, v10);
}
/**
* Returns a TreeMap containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key
* @param n The number of elements in the TreeMap
* @param f The Function computing element values
* @return A TreeMap consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code keyComparator} or {@code f} are null
*/
public static <K, V> TreeMap<K, V> tabulate(Comparator<? super K> keyComparator, int n, Function<? super Integer, ? extends Tuple2<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
return createTreeMap(EntryComparator.of(keyComparator), Collections.tabulate(n, f));
}
/**
* Returns a TreeMap containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
* The underlying key comparator is the natural comparator of K.
*
* @param <K> The key type
* @param <V> The value type
* @param n The number of elements in the TreeMap
* @param f The Function computing element values
* @return A TreeMap consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> tabulate(int n, Function<? super Integer, ? extends Tuple2<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
return createTreeMap(EntryComparator.natural(), Collections.tabulate(n, f));
}
/**
* Returns a TreeMap containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key
* @param n The number of elements in the TreeMap
* @param s The Supplier computing element values
* @return A TreeMap of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code keyComparator} or {@code s} are null
*/
@SuppressWarnings("unchecked")
public static <K, V> TreeMap<K, V> fill(Comparator<? super K> keyComparator, int n, Supplier<? extends Tuple2<? extends K, ? extends V>> s) {
Objects.requireNonNull(s, "s is null");
return createTreeMap(EntryComparator.of(keyComparator), Collections.fill(n, s));
}
/**
* Returns a TreeMap containing {@code n} values supplied by a given Supplier {@code s}.
* The underlying key comparator is the natural comparator of K.
*
* @param <K> The key type
* @param <V> The value type
* @param n The number of elements in the TreeMap
* @param s The Supplier computing element values
* @return A TreeMap of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> fill(int n, Supplier<? extends Tuple2<? extends K, ? extends V>> s) {
Objects.requireNonNull(s, "s is null");
return createTreeMap(EntryComparator.natural(), Collections.fill(n, s));
}
/**
* Creates a {@code TreeMap} of the given entries using the natural key comparator.
*
* @param <K> The key type
* @param <V> The value type
* @param entries Map entries
* @return A new TreeMap containing the given entries.
*/
@SuppressWarnings("varargs")
@SafeVarargs
public static <K extends Comparable<? super K>, V> TreeMap<K, V> ofEntries(Tuple2<? extends K, ? extends V>... entries) {
return createFromTuples(EntryComparator.natural(), entries);
}
/**
* Creates a {@code TreeMap} of the given entries using the given key comparator.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @param entries Map entries
* @return A new TreeMap containing the given entries.
*/
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public static <K, V> TreeMap<K, V> ofEntries(Comparator<? super K> keyComparator, Tuple2<? extends K, ? extends V>... entries) {
return createFromTuples(EntryComparator.of(keyComparator), entries);
}
/**
* Creates a {@code TreeMap} of the given entries using the natural key comparator.
*
* @param <K> The key type
* @param <V> The value type
* @param entries Map entries
* @return A new TreeMap containing the given entries.
*/
@SuppressWarnings("varargs")
@SafeVarargs
public static <K extends Comparable<? super K>, V> TreeMap<K, V> ofEntries(java.util.Map.Entry<? extends K, ? extends V>... entries) {
return createFromMapEntries(EntryComparator.natural(), entries);
}
/**
* Creates a {@code TreeMap} of the given entries using the given key comparator.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @param entries Map entries
* @return A new TreeMap containing the given entries.
*/
@SuppressWarnings("varargs")
@SafeVarargs
public static <K, V> TreeMap<K, V> ofEntries(Comparator<? super K> keyComparator, java.util.Map.Entry<? extends K, ? extends V>... entries) {
return createFromMapEntries(EntryComparator.of(keyComparator), entries);
}
/**
* Creates a {@code TreeMap} of the given entries.
*
* @param <K> The key type
* @param <V> The value type
* @param entries Map entries
* @return A new TreeMap containing the given entries.
*/
public static <K extends Comparable<? super K>, V> TreeMap<K, V> ofEntries(Iterable<? extends Tuple2<? extends K, ? extends V>> entries) {
return createTreeMap(EntryComparator.natural(), entries);
}
/**
* Creates a {@code TreeMap} of the given entries.
*
* @param <K> The key type
* @param <V> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @param entries Map entries
* @return A new TreeMap containing the given entries.
*/
@SuppressWarnings("unchecked")
public static <K, V> TreeMap<K, V> ofEntries(Comparator<? super K> keyComparator, Iterable<? extends Tuple2<? extends K, ? extends V>> entries) {
return createTreeMap(EntryComparator.of(keyComparator), entries);
}
// -- TreeMap API
@Override
public <K2, V2> TreeMap<K2, V2> bimap(Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper) {
return bimap(this, EntryComparator.natural(), keyMapper, valueMapper);
}
@Override
public <K2, V2> TreeMap<K2, V2> bimap(Comparator<? super K2> keyComparator,
Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper) {
return bimap(this, EntryComparator.of(keyComparator), keyMapper, valueMapper);
}
@Override
public Tuple2<V, TreeMap<K, V>> computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
return Maps.computeIfAbsent(this, key, mappingFunction);
}
@Override
public Tuple2<Option<V>, TreeMap<K, V>> computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return Maps.computeIfPresent(this, key, remappingFunction);
}
@Override
public boolean containsKey(K key) {
return entries.contains(new Tuple2<>(key, /*ignored*/null));
}
@Override
public TreeMap<K, V> distinct() {
return Maps.distinct(this);
}
@Override
public TreeMap<K, V> distinctBy(Comparator<? super Tuple2<K, V>> comparator) {
return Maps.distinctBy(this, this::createFromEntries, comparator);
}
@Override
public <U> TreeMap<K, V> distinctBy(Function<? super Tuple2<K, V>, ? extends U> keyExtractor) {
return Maps.distinctBy(this, this::createFromEntries, keyExtractor);
}
@Override
public TreeMap<K, V> drop(int n) {
return Maps.drop(this, this::createFromEntries, this::emptyInstance, n);
}
@Override
public TreeMap<K, V> dropRight(int n) {
return Maps.dropRight(this, this::createFromEntries, this::emptyInstance, n);
}
@Override
public TreeMap<K, V> dropUntil(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.dropUntil(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> dropWhile(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.dropWhile(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> filter(BiPredicate<? super K, ? super V> predicate) {
return Maps.filter(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> filter(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.filter(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> filterKeys(Predicate<? super K> predicate) {
return Maps.filterKeys(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> filterValues(Predicate<? super V> predicate) {
return Maps.filterValues(this, this::createFromEntries, predicate);
}
@Override
public <K2, V2> TreeMap<K2, V2> flatMap(BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper) {
return flatMap(this, EntryComparator.natural(), mapper);
}
@Override
public <K2, V2> TreeMap<K2, V2> flatMap(Comparator<? super K2> keyComparator,
BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper) {
return flatMap(this, EntryComparator.of(keyComparator), mapper);
}
@Override
public Option<V> get(K key) {
final V ignored = null;
return entries.find(new Tuple2<>(key, ignored)).map(Tuple2::_2);
}
@Override
public V getOrElse(K key, V defaultValue) {
return get(key).getOrElse(defaultValue);
}
@Override
public <C> Map<C, TreeMap<K, V>> groupBy(Function<? super Tuple2<K, V>, ? extends C> classifier) {
return Maps.groupBy(this, this::createFromEntries, classifier);
}
@Override
public Iterator<TreeMap<K, V>> grouped(int size) {
return Maps.grouped(this, this::createFromEntries, size);
}
@Override
public Tuple2<K, V> head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty TreeMap");
} else {
return entries.min().get();
}
}
@Override
public TreeMap<K, V> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty TreeMap");
} else {
final Tuple2<K, V> max = entries.max().get();
return new TreeMap<>(entries.delete(max));
}
}
@Override
public Option<TreeMap<K, V>> initOption() {
return Maps.initOption(this);
}
/**
* An {@code TreeMap}'s value is computed synchronously.
*
* @return false
*/
@Override
public boolean isAsync() {
return false;
}
@Override
public boolean isEmpty() {
return entries.isEmpty();
}
/**
* An {@code TreeMap}'s value is computed eagerly.
*
* @return false
*/
@Override
public boolean isLazy() {
return false;
}
@Override
public Iterator<Tuple2<K, V>> iterator() {
return entries.iterator();
}
@Override
public SortedSet<K> keySet() {
return TreeSet.ofAll(comparator(), iterator().map(Tuple2::_1));
}
@Override
public <K2, V2> TreeMap<K2, V2> map(BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper) {
return map(this, EntryComparator.natural(), mapper);
}
@Override
public <K2, V2> TreeMap<K2, V2> map(Comparator<? super K2> keyComparator,
BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
return map(this, EntryComparator.of(keyComparator), mapper);
}
@Override
public <K2> TreeMap<K2, V> mapKeys(Function<? super K, ? extends K2> keyMapper) {
Objects.requireNonNull(keyMapper, "keyMapper is null");
return map((k, v) -> Tuple.of(keyMapper.apply(k), v));
}
@Override
public <K2> TreeMap<K2, V> mapKeys(Function<? super K, ? extends K2> keyMapper, BiFunction<? super V, ? super V, ? extends V> valueMerge) {
final Comparator<K2> comparator = Comparators.naturalComparator();
return Collections.mapKeys(this, TreeMap.<K2, V> empty(comparator), keyMapper, valueMerge);
}
@Override
public <W> TreeMap<K, W> mapValues(Function<? super V, ? extends W> valueMapper) {
Objects.requireNonNull(valueMapper, "valueMapper is null");
return map(comparator(), (k, v) -> Tuple.of(k, valueMapper.apply(v)));
}
@Override
public TreeMap<K, V> merge(Map<? extends K, ? extends V> that) {
return Maps.merge(this, this::createFromEntries, that);
}
@Override
public <U extends V> TreeMap<K, V> merge(Map<? extends K, U> that,
BiFunction<? super V, ? super U, ? extends V> collisionResolution) {
return Maps.merge(this, this::createFromEntries, that, collisionResolution);
}
/**
* Returns this {@code TreeMap} if it is nonempty,
* otherwise {@code TreeMap} created from iterable, using existing comparator.
*
* @param other An alternative {@code Traversable}
* @return this {@code TreeMap} if it is nonempty,
* otherwise {@code TreeMap} created from iterable, using existing comparator.
*/
@Override
public TreeMap<K, V> orElse(Iterable<? extends Tuple2<K, V>> other) {
return isEmpty() ? ofEntries(comparator(), other) : this;
}
/**
* Returns this {@code TreeMap} if it is nonempty,
* otherwise {@code TreeMap} created from result of evaluating supplier, using existing comparator.
*
* @param supplier An alternative {@code Traversable}
* @return this {@code TreeMap} if it is nonempty,
* otherwise {@code TreeMap} created from result of evaluating supplier, using existing comparator.
*/
@Override
public TreeMap<K, V> orElse(Supplier<? extends Iterable<? extends Tuple2<K, V>>> supplier) {
return isEmpty() ? ofEntries(comparator(), supplier.get()) : this;
}
@Override
public Tuple2<TreeMap<K, V>, TreeMap<K, V>> partition(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.partition(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> peek(Consumer<? super Tuple2<K, V>> action) {
return Maps.peek(this, action);
}
@Override
public <U extends V> TreeMap<K, V> put(K key, U value, BiFunction<? super V, ? super U, ? extends V> merge) {
return Maps.put(this, key, value, merge);
}
@Override
public TreeMap<K, V> put(K key, V value) {
return new TreeMap<>(entries.insert(new Tuple2<>(key, value)));
}
@Override
public TreeMap<K, V> put(Tuple2<? extends K, ? extends V> entry) {
return Maps.put(this, entry);
}
@Override
public <U extends V> TreeMap<K, V> put(Tuple2<? extends K, U> entry,
BiFunction<? super V, ? super U, ? extends V> merge) {
return Maps.put(this, entry, merge);
}
@Override
public TreeMap<K, V> remove(K key) {
final V ignored = null;
final Tuple2<K, V> entry = new Tuple2<>(key, ignored);
if (entries.contains(entry)) {
return new TreeMap<>(entries.delete(entry));
} else {
return this;
}
}
@Override
public TreeMap<K, V> removeAll(BiPredicate<? super K, ? super V> predicate) {
return Maps.removeAll(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> removeAll(Iterable<? extends K> keys) {
final V ignored = null;
RedBlackTree<Tuple2<K, V>> removed = entries;
for (K key : keys) {
final Tuple2<K, V> entry = new Tuple2<>(key, ignored);
if (removed.contains(entry)) {
removed = removed.delete(entry);
}
}
if (removed.size() == entries.size()) {
return this;
} else {
return new TreeMap<>(removed);
}
}
@Override
public TreeMap<K, V> removeKeys(Predicate<? super K> predicate) {
return Maps.removeKeys(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> removeValues(Predicate<? super V> predicate) {
return Maps.removeValues(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> replace(Tuple2<K, V> currentElement, Tuple2<K, V> newElement) {
return Maps.replace(this, currentElement, newElement);
}
@Override
public TreeMap<K, V> replaceAll(Tuple2<K, V> currentElement, Tuple2<K, V> newElement) {
return Maps.replaceAll(this, currentElement, newElement);
}
@Override
public TreeMap<K, V> replaceValue(K key, V value) {
return Maps.replaceValue(this, key, value);
}
@Override
public TreeMap<K, V> replace(K key, V oldValue, V newValue) {
return Maps.replace(this, key, oldValue, newValue);
}
@Override
public TreeMap<K, V> replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
return Maps.replaceAll(this, function);
}
@Override
public TreeMap<K, V> retainAll(Iterable<? extends Tuple2<K, V>> elements) {
Objects.requireNonNull(elements, "elements is null");
RedBlackTree<Tuple2<K, V>> tree = RedBlackTree.empty(entries.comparator());
for (Tuple2<K, V> entry : elements) {
if (contains(entry)) {
tree = tree.insert(entry);
}
}
return new TreeMap<>(tree);
}
@Override
public TreeMap<K, V> scan(
Tuple2<K, V> zero,
BiFunction<? super Tuple2<K, V>, ? super Tuple2<K, V>, ? extends Tuple2<K, V>> operation) {
return Maps.scan(this, zero, operation, this::createFromEntries);
}
@Override
public int size() {
return entries.size();
}
@Override
public Iterator<TreeMap<K, V>> slideBy(Function<? super Tuple2<K, V>, ?> classifier) {
return Maps.slideBy(this, this::createFromEntries, classifier);
}
@Override
public Iterator<TreeMap<K, V>> sliding(int size) {
return Maps.sliding(this, this::createFromEntries, size);
}
@Override
public Iterator<TreeMap<K, V>> sliding(int size, int step) {
return Maps.sliding(this, this::createFromEntries, size, step);
}
@Override
public Tuple2<TreeMap<K, V>, TreeMap<K, V>> span(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.span(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty TreeMap");
} else {
final Tuple2<K, V> min = entries.min().get();
return new TreeMap<>(entries.delete(min));
}
}
@Override
public Option<TreeMap<K, V>> tailOption() {
return Maps.tailOption(this);
}
@Override
public TreeMap<K, V> take(int n) {
return Maps.take(this, this::createFromEntries, n);
}
@Override
public TreeMap<K, V> takeRight(int n) {
return Maps.takeRight(this, this::createFromEntries, n);
}
@Override
public TreeMap<K, V> takeUntil(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.takeUntil(this, this::createFromEntries, predicate);
}
@Override
public TreeMap<K, V> takeWhile(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.takeWhile(this, this::createFromEntries, predicate);
}
@Override
public java.util.TreeMap<K, V> toJavaMap() {
return toJavaMap(() -> new java.util.TreeMap<>(comparator()), t -> t);
}
@Override
public Seq<V> values() {
return iterator().map(Tuple2::_2).toStream();
}
// -- Object
@Override
public boolean equals(Object o) {
return Collections.equals(this, o);
}
@Override
public int hashCode() {
return Collections.hashUnordered(this);
}
@Override
public String stringPrefix() {
return "TreeMap";
}
@Override
public String toString() {
return mkString(stringPrefix() + "(", ", ", ")");
}
// -- private helpers
private static <K, K2, V, V2> TreeMap<K2, V2> bimap(TreeMap<K, V> map, EntryComparator<K2, V2> entryComparator,
Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper) {
Objects.requireNonNull(keyMapper, "keyMapper is null");
Objects.requireNonNull(valueMapper, "valueMapper is null");
return createTreeMap(entryComparator, map.entries, entry -> entry.map(keyMapper, valueMapper));
}
private static <K, V, K2, V2> TreeMap<K2, V2> flatMap(TreeMap<K, V> map, EntryComparator<K2, V2> entryComparator,
BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return createTreeMap(entryComparator, map.entries.iterator().flatMap(entry -> mapper.apply(entry._1, entry._2)));
}
private static <K, K2, V, V2> TreeMap<K2, V2> map(TreeMap<K, V> map, EntryComparator<K2, V2> entryComparator,
BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return createTreeMap(entryComparator, map.entries, entry -> entry.map(mapper));
}
// -- internal factory methods
private static <K, V> Collector<Tuple2<K, V>, ArrayList<Tuple2<K, V>>, TreeMap<K, V>> createCollector(EntryComparator<K, V> entryComparator) {
final Supplier<ArrayList<Tuple2<K, V>>> supplier = ArrayList::new;
final BiConsumer<ArrayList<Tuple2<K, V>>, Tuple2<K, V>> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<Tuple2<K, V>>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<Tuple2<K, V>>, TreeMap<K, V>> finisher = list -> createTreeMap(entryComparator, list);
return Collector.of(supplier, accumulator, combiner, finisher);
}
private static <K, V, T> Collector<T, ArrayList<T>, TreeMap<K, V>> createCollector(
EntryComparator<K, V> entryComparator,
Function<? super T, ? extends K> keyMapper, Function<? super T, ? extends V> valueMapper) {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, TreeMap<K, V>> finisher = arr -> createTreeMap(entryComparator, Iterator.ofAll(arr)
.map(t -> Tuple.of(keyMapper.apply(t), valueMapper.apply(t))));
return Collector.of(supplier, accumulator, combiner, finisher);
}
@SuppressWarnings("unchecked")
private static <K, V> TreeMap<K, V> createTreeMap(EntryComparator<K, V> entryComparator,
Iterable<? extends Tuple2<? extends K, ? extends V>> entries) {
Objects.requireNonNull(entries, "entries is null");
RedBlackTree<Tuple2<K, V>> tree = RedBlackTree.empty(entryComparator);
for (Tuple2<K, V> entry : (Iterable<Tuple2<K, V>>) entries) {
tree = tree.insert(entry);
}
return new TreeMap<>(tree);
}
private static <K, K2, V, V2> TreeMap<K2, V2> createTreeMap(EntryComparator<K2, V2> entryComparator,
Iterable<Tuple2<K, V>> entries, Function<Tuple2<K, V>, Tuple2<K2, V2>> entryMapper) {
RedBlackTree<Tuple2<K2, V2>> tree = RedBlackTree.empty(entryComparator);
for (Tuple2<K, V> entry : entries) {
tree = tree.insert(entryMapper.apply(entry));
}
return new TreeMap<>(tree);
}
@SuppressWarnings("unchecked")
private static <K, V> TreeMap<K, V> createFromMap(EntryComparator<K, V> entryComparator, java.util.Map<? extends K, ? extends V> map) {
Objects.requireNonNull(map, "map is null");
RedBlackTree<Tuple2<K, V>> tree = RedBlackTree.empty(entryComparator);
for (java.util.Map.Entry<K, V> entry : ((java.util.Map<K, V>) map).entrySet()) {
tree = tree.insert(Tuple.of(entry.getKey(), entry.getValue()));
}
return new TreeMap<>(tree);
}
@SuppressWarnings("unchecked")
private static <K, V> TreeMap<K, V> createFromTuple(EntryComparator<K, V> entryComparator, Tuple2<? extends K, ? extends V> entry) {
Objects.requireNonNull(entry, "entry is null");
return new TreeMap<>(RedBlackTree.of(entryComparator, (Tuple2<K, V>) entry));
}
@SuppressWarnings("unchecked")
private static <K, V> TreeMap<K, V> createFromTuples(EntryComparator<K, V> entryComparator, Tuple2<? extends K, ? extends V>... entries) {
Objects.requireNonNull(entries, "entries is null");
RedBlackTree<Tuple2<K, V>> tree = RedBlackTree.empty(entryComparator);
for (Tuple2<? extends K, ? extends V> entry : entries) {
tree = tree.insert((Tuple2<K, V>) entry);
}
return new TreeMap<>(tree);
}
@SafeVarargs
private static <K, V> TreeMap<K, V> createFromMapEntries(EntryComparator<K, V> entryComparator, java.util.Map.Entry<? extends K, ? extends V>... entries) {
Objects.requireNonNull(entries, "entries is null");
RedBlackTree<Tuple2<K, V>> tree = RedBlackTree.empty(entryComparator);
for (java.util.Map.Entry<? extends K, ? extends V> entry : entries) {
final K key = entry.getKey();
final V value = entry.getValue();
tree = tree.insert(Tuple.of(key, value));
}
return new TreeMap<>(tree);
}
@SuppressWarnings("unchecked")
private static <K, V> TreeMap<K, V> createFromPairs(EntryComparator<K, V> entryComparator, Object... pairs) {
RedBlackTree<Tuple2<K, V>> tree = RedBlackTree.empty(entryComparator);
for (int i = 0; i < pairs.length; i += 2) {
final K key = (K) pairs[i];
final V value = (V) pairs[i + 1];
tree = tree.insert(Tuple.of(key, value));
}
return new TreeMap<>(tree);
}
private TreeMap<K, V> createFromEntries(Iterable<Tuple2<K, V>> tuples) {
return createTreeMap((EntryComparator<K, V>) entries.comparator(), tuples);
}
private TreeMap<K, V> emptyInstance() {
return isEmpty() ? this : new TreeMap<>(entries.emptyInstance());
}
@Override
public Comparator<K> comparator() {
return ((EntryComparator<K, V>) entries.comparator()).keyComparator();
}
// -- internal types
private interface EntryComparator<K, V> extends Comparator<Tuple2<K, V>>, Serializable {
long serialVersionUID = 1L;
static <K, V> EntryComparator<K, V> of(Comparator<? super K> keyComparator) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
return new Specific<>(keyComparator);
}
static <K, V> EntryComparator<K, V> natural() {
return Natural.instance();
}
Comparator<K> keyComparator();
// -- internal impls
final class Specific<K, V> implements EntryComparator<K, V> {
private static final long serialVersionUID = 1L;
private final Comparator<K> keyComparator;
@SuppressWarnings("unchecked")
Specific(Comparator<? super K> keyComparator) {
this.keyComparator = (Comparator<K>) keyComparator;
}
@Override
public int compare(Tuple2<K, V> e1, Tuple2<K, V> e2) {
return keyComparator.compare(e1._1, e2._1);
}
@Override
public Comparator<K> keyComparator() {
return keyComparator;
}
}
final class Natural<K, V> implements EntryComparator<K, V> {
private static final long serialVersionUID = 1L;
private static final Natural<?, ?> INSTANCE = new Natural<>();
// hidden
private Natural() {
}
@SuppressWarnings("unchecked")
public static <K, V> Natural<K, V> instance() {
return (Natural<K, V>) INSTANCE;
}
@SuppressWarnings("unchecked")
@Override
public int compare(Tuple2<K, V> e1, Tuple2<K, V> e2) {
final K key1 = e1._1;
final K key2 = e2._1;
return ((Comparable<K>) key1).compareTo(key2);
}
@Override
public Comparator<K> keyComparator() {
return Comparators.naturalComparator();
}
/**
* Instance control for object serialization.
*
* @return The singleton instance of NaturalEntryComparator.
* @see java.io.Serializable
*/
private Object readResolve() {
return INSTANCE;
}
}
}
}
| 1 | 12,468 | Yep, could be simplified. Now looks like other *Map.values() impls | vavr-io-vavr | java |
@@ -183,6 +183,15 @@ class ApiContext extends \Imbo\BehatApiExtension\Context\ApiContext
$this->assertJsonObjectContainsKeys('configuration,columns,collection,info');
}
+ /**
+ * @Then /^print last api response$/
+ */
+ public function printLastApiResponse(): void
+ {
+ $this->requireResponse();
+ echo $this->response->getBody();
+ }
+
/**
* {@inheritDoc}
*/ | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
use Assert\Assertion;
use Assert\AssertionFailedException as AssertionFailure;
use Behat\Behat\Hook\Scope\BeforeScenarioScope;
use Behat\Gherkin\Node\TableNode;
use Imbo\BehatApiExtension\Exception\AssertionFailedException;
use Symfony\Component\HttpFoundation\Response;
/**
*/
class ApiContext extends \Imbo\BehatApiExtension\Context\ApiContext
{
private const JSON_CONTENT = 'application/json';
/**
* @var StorageContext
*/
private $storageContext;
/**
* @BeforeScenario
*
* @param BeforeScenarioScope $scope
*/
public function gatherContexts(BeforeScenarioScope $scope): void
{
$environment = $scope->getEnvironment();
$this->storageContext = $environment->getContext('StorageContext');
}
/**
* @param string $key
* @param string $var
*
* @Then remember response param :key as :var
*/
public function rememberResponseParam(string $key, string $var): void
{
$response = $this->getResponseBody();
if (!isset($response->{$key})) {
throw new \RuntimeException(sprintf(
'Key "%s" not found in response "%s"',
$key,
$this->response->getBody()
));
}
$this->storageContext->add($var, $response->{$key});
}
/**
* @param string $keys
*
* @throws AssertionFailedException
*
* @Then the JSON object contains keys :keys
*/
public function assertJsonObjectContainsKeys(string $keys): void
{
$this->requireResponse();
$body = $this->getResponseBody();
$keysCollection = explode(',', $keys);
try {
foreach ($keysCollection as $key) {
Assertion::propertyExists($body, $key);
}
} catch (AssertionFailure $e) {
throw new AssertionFailedException($e->getMessage());
}
}
/**
* @throws AssertionFailedException
*
* @Then not found response is received
*/
public function assertResponseNotFound(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_NOT_FOUND);
$this->assertJsonObjectContainsKeys('code,message');
}
/**
* @throws AssertionFailedException
*
* @Then created response is received
*/
public function assertResponseCreated(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_CREATED);
$this->assertJsonObjectContainsKeys('id');
}
/**
* @throws AssertionFailedException
*
* @Then validation error response is received
*/
public function assertResponseValidationError(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_BAD_REQUEST);
$this->assertJsonObjectContainsKeys('code,message,errors');
}
/**
* @throws AssertionFailedException
*
* @Then empty response is received
*/
public function assertResponseEmpty(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_NO_CONTENT);
}
/**
* @throws AssertionFailedException
*
* @Then access denied response is received
*/
public function assertResponseAccessDenied(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_FORBIDDEN);
}
/**
* @throws AssertionFailedException
*
* @Then unauthorized response is received
*/
public function assertResponseUnauthorized(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_UNAUTHORIZED);
}
/**
* @throws AssertionFailedException
*
* @Then conflict response is received
*/
public function assertResponseConflict(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_CONFLICT);
$this->assertJsonObjectContainsKeys('code,message');
}
/**
* @throws AssertionFailedException
*
* @Then not implemented response is received
*/
public function assertResponseNotImplemented(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_NOT_IMPLEMENTED);
$this->assertJsonObjectContainsKeys('code,message');
}
/**
* @throws AssertionFailedException
*
* @Then grid response is received
*/
public function assertResponseGrid(): void
{
$this->requireResponse();
$this->assertResponseCodeIs(Response::HTTP_OK);
$this->assertJsonObjectContainsKeys('configuration,columns,collection,info');
}
/**
* {@inheritDoc}
*/
public function assertResponseCodeIs($code): void
{
try {
$actual = $this->response->getStatusCode();
$expected = $this->validateResponseCode($code);
$body = $this->response->getBody()->getContents();
$message = sprintf(
'Expected response code "%d", got "%d". Revived "%s"',
$expected,
$actual,
$body
);
Assertion::same($actual, $expected, $message);
} catch (\Exception $e) {
throw new AssertionFailedException($e->getMessage());
}
}
/**
* {@inheritDoc}
*/
public function requestPath($path, $method = null)
{
$path = $this->storageContext->replaceVars($path);
$this->setRequestHeader('Accept', self::JSON_CONTENT);
parent::requestPath($path, $method);
}
/**
* {@inheritDoc}
*/
public function setRequestBody($string)
{
$string = $this->storageContext->replaceVars($string);
return parent::setRequestBody($string);
}
/**
* {@inheritDoc}
*/
public function setRequestFormParams(TableNode $table): void
{
$data = $table->getTable();
foreach ($data as $rowKey => $row) {
foreach ($row as $columnKey => $column) {
$data[$rowKey][$columnKey] = $this->storageContext->replaceVars($column);
}
}
parent::setRequestFormParams(new TableNode($data));
}
/**
* @return array|mixed|stdClass
*/
public function getLastResponseBody()
{
return $this->getResponseBody();
}
/**
*/
public function requestSend(): void
{
$this->sendRequest();
}
/**
* {@inheritDoc}
*/
protected function getResponseBody()
{
$source = (string) $this->response->getBody();
$body = json_decode($source, false);
if (json_last_error() !== JSON_ERROR_NONE) {
throw new InvalidArgumentException(sprintf(
'The response body does not contain valid JSON data. Received "%s"',
$source
));
}
if (!is_array($body) && !($body instanceof stdClass)) {
throw new InvalidArgumentException(sprintf(
'The response body does not contain a valid JSON array / object. Received "%s"',
$source
));
}
return $body;
}
/**
* {@inheritDoc}
*/
protected function getResponseBodyArray()
{
if (!is_array($body = $this->getResponseBody())) {
throw new InvalidArgumentException(sprintf(
'The response body does not contain a valid JSON array. Received "%s"',
$this->response->getBody()
));
}
return $body;
}
}
| 1 | 8,439 | This method will be for debug? | ergonode-backend | php |
@@ -9,9 +9,16 @@ import (
"errors"
)
+// ErrInvalidPassword is returned when the password for decrypting content where
+// private key is stored is not valid.
var ErrInvalidPassword = errors.New("invalid password")
+// Service for managing keystore private keys.
type Service interface {
+ // Key returns private key for specified name that was encrypted with
+ // provided password. If the private key does not exists it creates new one
+ // with name and password, and returns with created set to true.
Key(name, password string) (k *ecdsa.PrivateKey, created bool, err error)
+ // Exists returns true if the key with specified name exists.
Exists(name string) (bool, error)
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keystore
import (
"crypto/ecdsa"
"errors"
)
var ErrInvalidPassword = errors.New("invalid password")
type Service interface {
Key(name, password string) (k *ecdsa.PrivateKey, created bool, err error)
Exists(name string) (bool, error)
}
| 1 | 12,837 | // Key returns the private key for a specified name that was encrypted with the // provided password. If the private key does not exists it creates a new one // with a name and the password, and returns with `created` set to true. | ethersphere-bee | go |
@@ -45,6 +45,19 @@ class DiagramEntity(Figure):
self.node = node
+class PackageEntity(DiagramEntity):
+ """A diagram object representing a package"""
+
+
+class ClassEntity(DiagramEntity):
+ """A diagram object representing a class"""
+
+ def __init__(self, title, node):
+ super().__init__(title=title, node=node)
+ self.attrs = None
+ self.methods = None
+
+
class ClassDiagram(Figure, FilterMixIn):
"""main class diagram handling"""
| 1 | # Copyright (c) 2006, 2008-2010, 2012-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2014-2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Mark Byrne <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""diagram objects
"""
import astroid
from pylint.checkers.utils import decorated_with_property
from pylint.pyreverse.utils import FilterMixIn, is_interface
class Figure:
"""base class for counter handling"""
class Relationship(Figure):
"""a relation ship from an object in the diagram to another"""
def __init__(self, from_object, to_object, relation_type, name=None):
Figure.__init__(self)
self.from_object = from_object
self.to_object = to_object
self.type = relation_type
self.name = name
class DiagramEntity(Figure):
"""a diagram object, i.e. a label associated to an astroid node"""
def __init__(self, title="No name", node=None):
Figure.__init__(self)
self.title = title
self.node = node
class ClassDiagram(Figure, FilterMixIn):
"""main class diagram handling"""
TYPE = "class"
def __init__(self, title, mode):
FilterMixIn.__init__(self, mode)
Figure.__init__(self)
self.title = title
self.objects = []
self.relationships = {}
self._nodes = {}
self.depends = []
def get_relationships(self, role):
# sorted to get predictable (hence testable) results
return sorted(
self.relationships.get(role, ()),
key=lambda x: (x.from_object.fig_id, x.to_object.fig_id),
)
def add_relationship(self, from_object, to_object, relation_type, name=None):
"""create a relation ship"""
rel = Relationship(from_object, to_object, relation_type, name)
self.relationships.setdefault(relation_type, []).append(rel)
def get_relationship(self, from_object, relation_type):
"""return a relation ship or None"""
for rel in self.relationships.get(relation_type, ()):
if rel.from_object is from_object:
return rel
raise KeyError(relation_type)
def get_attrs(self, node):
"""return visible attributes, possibly with class name"""
attrs = []
properties = [
(n, m)
for n, m in node.items()
if isinstance(m, astroid.FunctionDef) and decorated_with_property(m)
]
for node_name, associated_nodes in (
list(node.instance_attrs_type.items())
+ list(node.locals_type.items())
+ properties
):
if not self.show_attr(node_name):
continue
names = self.class_names(associated_nodes)
if names:
node_name = "{} : {}".format(node_name, ", ".join(names))
attrs.append(node_name)
return sorted(attrs)
def get_methods(self, node):
"""return visible methods"""
methods = [
m
for m in node.values()
if isinstance(m, astroid.FunctionDef)
and not decorated_with_property(m)
and self.show_attr(m.name)
]
return sorted(methods, key=lambda n: n.name)
def add_object(self, title, node):
"""create a diagram object"""
assert node not in self._nodes
ent = DiagramEntity(title, node)
self._nodes[node] = ent
self.objects.append(ent)
def class_names(self, nodes):
"""return class names if needed in diagram"""
names = []
for node in nodes:
if isinstance(node, astroid.Instance):
node = node._proxied
if (
isinstance(node, (astroid.ClassDef, astroid.Name, astroid.Subscript))
and hasattr(node, "name")
and not self.has_node(node)
):
if node.name not in names:
node_name = node.name
names.append(node_name)
return names
def nodes(self):
"""return the list of underlying nodes"""
return self._nodes.keys()
def has_node(self, node):
"""return true if the given node is included in the diagram"""
return node in self._nodes
def object_from_node(self, node):
"""return the diagram object mapped to node"""
return self._nodes[node]
def classes(self):
"""return all class nodes in the diagram"""
return [o for o in self.objects if isinstance(o.node, astroid.ClassDef)]
def classe(self, name):
"""return a class by its name, raise KeyError if not found"""
for klass in self.classes():
if klass.node.name == name:
return klass
raise KeyError(name)
def extract_relationships(self):
"""extract relation ships between nodes in the diagram"""
for obj in self.classes():
node = obj.node
obj.attrs = self.get_attrs(node)
obj.methods = self.get_methods(node)
# shape
if is_interface(node):
obj.shape = "interface"
else:
obj.shape = "class"
# inheritance link
for par_node in node.ancestors(recurs=False):
try:
par_obj = self.object_from_node(par_node)
self.add_relationship(obj, par_obj, "specialization")
except KeyError:
continue
# implements link
for impl_node in node.implements:
try:
impl_obj = self.object_from_node(impl_node)
self.add_relationship(obj, impl_obj, "implements")
except KeyError:
continue
# associations link
for name, values in list(node.instance_attrs_type.items()) + list(
node.locals_type.items()
):
for value in values:
if value is astroid.Uninferable:
continue
if isinstance(value, astroid.Instance):
value = value._proxied
try:
associated_obj = self.object_from_node(value)
self.add_relationship(associated_obj, obj, "association", name)
except KeyError:
continue
class PackageDiagram(ClassDiagram):
"""package diagram handling"""
TYPE = "package"
def modules(self):
"""return all module nodes in the diagram"""
return [o for o in self.objects if isinstance(o.node, astroid.Module)]
def module(self, name):
"""return a module by its name, raise KeyError if not found"""
for mod in self.modules():
if mod.node.name == name:
return mod
raise KeyError(name)
def get_module(self, name, node):
"""return a module by its name, looking also for relative imports;
raise KeyError if not found
"""
for mod in self.modules():
mod_name = mod.node.name
if mod_name == name:
return mod
# search for fullname of relative import modules
package = node.root().name
if mod_name == f"{package}.{name}":
return mod
if mod_name == "{}.{}".format(package.rsplit(".", 1)[0], name):
return mod
raise KeyError(name)
def add_from_depend(self, node, from_module):
"""add dependencies created by from-imports"""
mod_name = node.root().name
obj = self.module(mod_name)
if from_module not in obj.node.depends:
obj.node.depends.append(from_module)
def extract_relationships(self):
"""extract relation ships between nodes in the diagram"""
ClassDiagram.extract_relationships(self)
for obj in self.classes():
# ownership
try:
mod = self.object_from_node(obj.node.root())
self.add_relationship(obj, mod, "ownership")
except KeyError:
continue
for obj in self.modules():
obj.shape = "package"
# dependencies
for dep_name in obj.node.depends:
try:
dep = self.get_module(dep_name, obj.node)
except KeyError:
continue
self.add_relationship(obj, dep, "depends")
| 1 | 14,758 | Adding the type hints revealed that it was necessary to distinguish between a ``PackageEntity`` and a ``ClassEntity``, because the ``ClassEntity`` has additional attributes that were dynamically added in the previous code, which confused ``mypy``. | PyCQA-pylint | py |
@@ -43,7 +43,8 @@ class _MissingPandasLikeDataFrame(object):
blocks = unsupported_property('blocks', deprecated=True)
# Functions
- add = unsupported_function('add')
+ add_prefix = unsupported_function('add_prefix')
+ add_suffix = unsupported_function('add_suffix')
agg = unsupported_function('agg')
aggregate = unsupported_function('aggregate')
align = unsupported_function('align') | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks.koalas.missing import _unsupported_function, _unsupported_property
def unsupported_function(method_name, deprecated=False):
return _unsupported_function(class_name='pd.DataFrame', method_name=method_name,
deprecated=deprecated)
def unsupported_property(property_name, deprecated=False):
return _unsupported_property(class_name='pd.DataFrame', property_name=property_name,
deprecated=deprecated)
class _MissingPandasLikeDataFrame(object):
# Properties
T = unsupported_property('T')
axes = unsupported_property('axes')
ftypes = unsupported_property('ftypes')
iat = unsupported_property('iat')
is_copy = unsupported_property('is_copy')
ix = unsupported_property('ix')
ndim = unsupported_property('ndim')
style = unsupported_property('style')
# Deprecated properties
blocks = unsupported_property('blocks', deprecated=True)
# Functions
add = unsupported_function('add')
agg = unsupported_function('agg')
aggregate = unsupported_function('aggregate')
align = unsupported_function('align')
all = unsupported_function('all')
any = unsupported_function('any')
append = unsupported_function('append')
apply = unsupported_function('apply')
asfreq = unsupported_function('asfreq')
asof = unsupported_function('asof')
at_time = unsupported_function('at_time')
between_time = unsupported_function('between_time')
bfill = unsupported_function('bfill')
bool = unsupported_function('bool')
boxplot = unsupported_function('boxplot')
combine = unsupported_function('combine')
combine_first = unsupported_function('combine_first')
compound = unsupported_function('compound')
corrwith = unsupported_function('corrwith')
cov = unsupported_function('cov')
cummax = unsupported_function('cummax')
cummin = unsupported_function('cummin')
cumprod = unsupported_function('cumprod')
cumsum = unsupported_function('cumsum')
diff = unsupported_function('diff')
div = unsupported_function('div')
divide = unsupported_function('divide')
dot = unsupported_function('dot')
drop_duplicates = unsupported_function('drop_duplicates')
droplevel = unsupported_function('droplevel')
duplicated = unsupported_function('duplicated')
eq = unsupported_function('eq')
equals = unsupported_function('equals')
eval = unsupported_function('eval')
ewm = unsupported_function('ewm')
expanding = unsupported_function('expanding')
ffill = unsupported_function('ffill')
filter = unsupported_function('filter')
first = unsupported_function('first')
first_valid_index = unsupported_function('first_valid_index')
floordiv = unsupported_function('floordiv')
ge = unsupported_function('ge')
get_dtype_counts = unsupported_function('get_dtype_counts')
get_values = unsupported_function('get_values')
gt = unsupported_function('gt')
hist = unsupported_function('hist')
idxmax = unsupported_function('idxmax')
idxmin = unsupported_function('idxmin')
infer_objects = unsupported_function('infer_objects')
info = unsupported_function('info')
insert = unsupported_function('insert')
interpolate = unsupported_function('interpolate')
items = unsupported_function('items')
iterrows = unsupported_function('iterrows')
itertuples = unsupported_function('itertuples')
join = unsupported_function('join')
keys = unsupported_function('keys')
last = unsupported_function('last')
last_valid_index = unsupported_function('last_valid_index')
le = unsupported_function('le')
lookup = unsupported_function('lookup')
lt = unsupported_function('lt')
mad = unsupported_function('mad')
mask = unsupported_function('mask')
median = unsupported_function('median')
melt = unsupported_function('melt')
memory_usage = unsupported_function('memory_usage')
mod = unsupported_function('mod')
mode = unsupported_function('mode')
mul = unsupported_function('mul')
multiply = unsupported_function('multiply')
ne = unsupported_function('ne')
pct_change = unsupported_function('pct_change')
pivot = unsupported_function('pivot')
pivot_table = unsupported_function('pivot_table')
pop = unsupported_function('pop')
pow = unsupported_function('pow')
prod = unsupported_function('prod')
product = unsupported_function('product')
quantile = unsupported_function('quantile')
query = unsupported_function('query')
radd = unsupported_function('radd')
rank = unsupported_function('rank')
rdiv = unsupported_function('rdiv')
reindex = unsupported_function('reindex')
reindex_axis = unsupported_function('reindex_axis')
reindex_like = unsupported_function('reindex_like')
rename = unsupported_function('rename')
rename_axis = unsupported_function('rename_axis')
reorder_levels = unsupported_function('reorder_levels')
replace = unsupported_function('replace')
resample = unsupported_function('resample')
rfloordiv = unsupported_function('rfloordiv')
rmod = unsupported_function('rmod')
rmul = unsupported_function('rmul')
rolling = unsupported_function('rolling')
round = unsupported_function('round')
rpow = unsupported_function('rpow')
rsub = unsupported_function('rsub')
rtruediv = unsupported_function('rtruediv')
select_dtypes = unsupported_function('select_dtypes')
sem = unsupported_function('sem')
set_axis = unsupported_function('set_axis')
shift = unsupported_function('shift')
slice_shift = unsupported_function('slice_shift')
squeeze = unsupported_function('squeeze')
stack = unsupported_function('stack')
sub = unsupported_function('sub')
subtract = unsupported_function('subtract')
swapaxes = unsupported_function('swapaxes')
swaplevel = unsupported_function('swaplevel')
tail = unsupported_function('tail')
take = unsupported_function('take')
to_dense = unsupported_function('to_dense')
to_feather = unsupported_function('to_feather')
to_gbq = unsupported_function('to_gbq')
to_hdf = unsupported_function('to_hdf')
to_msgpack = unsupported_function('to_msgpack')
to_parquet = unsupported_function('to_parquet')
to_period = unsupported_function('to_period')
to_pickle = unsupported_function('to_pickle')
to_sparse = unsupported_function('to_sparse')
to_sql = unsupported_function('to_sql')
to_stata = unsupported_function('to_stata')
to_timestamp = unsupported_function('to_timestamp')
to_xarray = unsupported_function('to_xarray')
transform = unsupported_function('transform')
transpose = unsupported_function('transpose')
truediv = unsupported_function('truediv')
truncate = unsupported_function('truncate')
tshift = unsupported_function('tshift')
tz_convert = unsupported_function('tz_convert')
tz_localize = unsupported_function('tz_localize')
unstack = unsupported_function('unstack')
update = unsupported_function('update')
where = unsupported_function('where')
xs = unsupported_function('xs')
# Deprecated functions
as_blocks = unsupported_function('as_blocks', deprecated=True)
as_matrix = unsupported_function('as_matrix', deprecated=True)
clip_lower = unsupported_function('clip_lower', deprecated=True)
clip_upper = unsupported_function('clip_upper', deprecated=True)
convert_objects = unsupported_function('convert_objects', deprecated=True)
get_ftype_counts = unsupported_function('get_ftype_counts', deprecated=True)
get_value = unsupported_function('get_value', deprecated=True)
select = unsupported_function('select', deprecated=True)
set_value = unsupported_function('set_value', deprecated=True)
to_panel = unsupported_function('to_panel', deprecated=True)
| 1 | 9,905 | These two functions should be available now. | databricks-koalas | py |
@@ -651,10 +651,17 @@ type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
+ fd *os.File // file object reference
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
+ // Check if file has the same size and modTime
+ fi, err := file.fd.Stat()
+ if file.o.size != fi.Size() || file.o.modTime != fi.ModTime() {
+ return 0, errors.New("can't copy - source file is being updated")
+ }
+
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error | 1 | // Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/appengine/log"
)
var (
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}},
}
fs.Register(fsi)
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string) (fs.Fs, error) {
var err error
if *noUTFNorm {
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
nounc := config.FileGet(name, "nounc")
f := &Fs{
name: name,
warned: make(map[string]struct{}),
nounc: nounc == "true",
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
}
f.root = f.cleanPath(root)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if *followSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root, _ = getDirFile(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote, dstPath)
if info != nil {
o.setMetadata(info)
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
if os.IsPermission(err) {
return nil, fs.ErrorPermissionDenied
}
return nil, err
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
return nil, fs.ErrorDirNotFound
}
fd, err := os.Open(fsDirPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
}
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
}
for _, fi := range fis {
name := fi.Name()
mode := fi.Mode()
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if err != nil {
return nil, err
}
mode = fi.Mode()
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
}
if fso.Storable() {
entries = append(entries, fso)
}
}
}
}
return entries, nil
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = filepath.ToSlash(name)
return name
}
// mapper maps raw to cleaned directory names
type mapper struct {
mu sync.RWMutex // mutex to protect the below
m map[string]string // map of un-normalised directory names
}
func newMapper() *mapper {
return &mapper{
m: make(map[string]string),
}
}
// Lookup a directory name to make a local name (reverses
// cleanDirName)
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Load(in string) string {
m.mu.RLock()
out, ok := m.m[in]
m.mu.RUnlock()
if ok {
return out
}
return in
}
// Cleans a directory name recording if it needed to be altered
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Save(in, out string) string {
if in != out {
m.mu.Lock()
m.m[out] = in
m.mu.Unlock()
}
return out
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "")
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := f.lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime() == t {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := f.lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote, "")
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return nil, err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return nil, err
} else if err != nil {
// not quite clear, but probably trying to move a file across file system
// boundaries. Copying might still work.
fs.Errorf(src, "Can't move: %v: trying copy", err)
return nil, fs.ErrorCantMove
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
// Check if destination exists
_, err := os.Lstat(dstPath)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Create parent of destination
dstParentPath, _ := getDirFile(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
// Do the move
return os.Rename(srcPath, dstPath)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Supported
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
if !o.modTime.Equal(oldtime) || oldsize != o.size {
o.hashes = nil
}
if o.hashes == nil {
o.hashes = make(map[hash.Type]string)
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
o.hashes, err = hash.Stream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
}
return o.hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.mode
// On windows a file with os.ModeSymlink represents a file with reparse points
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
fs.Debugf(o, "Clearing symlink bit to allow a file with reparse points to be copied")
mode &^= os.ModeSymlink
}
if mode&os.ModeSymlink != 0 {
if !*skipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.hashes = file.hash.Sums()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, 0)
// don't attempt to make checksums
return wrappedFd, err
}
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
hashes = x.Hashes
}
}
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.hashes = hash.Sums()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
o.setMetadata(info)
}
return err
}
// Remove an object
func (o *Object) Remove() error {
return os.Remove(o.path)
}
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.nounc {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)
| 1 | 6,791 | `fi` is what the result of Stat is called elsewhere in this file not `finfo` | rclone-rclone | go |
@@ -62,15 +62,6 @@ RSpec.describe Blacklight::OpenStructWithHashAccess do
end
end
- describe "#replace" do
- subject { described_class.new a: 1 }
-
- it "can use #replace to reorder the hash" do
- subject.replace b: 1
- expect(subject.b).to eq 1
- end
- end
-
describe "#sort_by" do
subject { described_class.new c: 3, b: 1, a: 2 }
| 1 | # frozen_string_literal: true
RSpec.describe Blacklight::OpenStructWithHashAccess do
it "provides hash-like accessors for OpenStruct data" do
a = described_class.new foo: :bar, baz: 1
expect(a[:foo]).to eq :bar
expect(a[:baz]).to eq 1
expect(a[:asdf]).to be_nil
end
it "provides hash-like writers for OpenStruct data" do
a = described_class.new foo: :bar, baz: 1
a[:asdf] = 'qwerty'
expect(a.asdf).to eq 'qwerty'
end
it "treats symbols and strings interchangeably in hash access" do
h = described_class.new
h["string"] = "value"
expect(h[:string]).to eq "value"
expect(h.string).to eq "value"
h[:symbol] = "value"
expect(h["symbol"]).to eq "value"
expect(h.symbol).to eq "value"
end
describe "internal hash table" do
before do
@h = described_class.new
@h[:a] = 1
@h[:b] = 2
end
it "exposes the internal hash table" do
expect(@h.to_h).to be_a_kind_of(Hash)
expect(@h.to_h[:a]).to eq 1
end
it "exposes keys" do
expect(@h.keys).to include(:a, :b)
end
end
describe "#key?" do
subject do
h = described_class.new
h[:a] = 1
h[:b] = 2
h
end
it "is true if the key exists" do
expect(subject.key?(:a)).to eq true
end
it "is false if the key does not exist" do
expect(subject.key?(:c)).to eq false
end
end
describe "#replace" do
subject { described_class.new a: 1 }
it "can use #replace to reorder the hash" do
subject.replace b: 1
expect(subject.b).to eq 1
end
end
describe "#sort_by" do
subject { described_class.new c: 3, b: 1, a: 2 }
it "sorts the underlying hash" do
sorted = subject.sort_by { |_k, v| v }
expect(sorted.keys).to match_array [:b, :a, :c]
end
end
describe "#sort_by!" do
subject { described_class.new c: 3, b: 1, a: 2 }
it "sorts the underlying hash" do
subject.sort_by! { |_k, v| v }
expect(subject.keys).to match_array [:b, :a, :c]
end
end
describe "#merge" do
before do
@h = described_class.new
@h[:a] = 1
@h[:b] = 2
end
it "merges the object with a hash" do
expect(@h.merge(a: 'a')[:a]).to eq 'a'
end
it "merges the object with another struct" do
expect(@h.merge(described_class.new(a: 'a'))[:a]).to eq 'a'
end
end
describe "#merge!" do
before do
@h = described_class.new
@h[:a] = 1
@h[:b] = 2
end
it "merges the object with a hash" do
@h[:a] = 'a'
expect(@h[:a]).to eq 'a'
end
it "merges the object with another struct" do
@h.merge!(described_class.new(a: 'a'))
expect(@h[:a]).to eq 'a'
end
end
describe "#to_json" do
subject { described_class.new a: 1, b: 2 }
it "serializes as json" do
expect(subject.to_json).to eq({ a: 1, b: 2 }.to_json)
end
end
describe "#deep_dup" do
subject { described_class.new a: 1, b: { c: 1 } }
it "duplicates nested hashes" do
copy = subject.deep_dup
copy.a = 2
copy.b[:c] = 2
expect(subject.a).to eq 1
expect(subject.b[:c]).to eq 1
expect(copy.a).to eq 2
expect(copy.b[:c]).to eq 2
end
end
end
| 1 | 8,807 | I'm confused; are we just dropping these methods without deprecation? | projectblacklight-blacklight | rb |
@@ -16,12 +16,18 @@ void AddEdgesProcessor::process(const cpp2::AddEdgesRequest& req) {
auto spaceId = req.get_space_id();
auto version =
std::numeric_limits<int64_t>::max() - time::WallClock::fastNowInMicroSec();
+ // Switch version to big-endian, make sure the key is in ordered.
+ version = folly::Endian::big(version);
+
callingNum_ = req.parts.size();
CHECK_NOTNULL(kvstore_);
std::for_each(req.parts.begin(), req.parts.end(), [&](auto& partEdges){
auto partId = partEdges.first;
std::vector<kvstore::KV> data;
std::for_each(partEdges.second.begin(), partEdges.second.end(), [&](auto& edge){
+ VLOG(4) << "PartitionID: " << partId << ", VertexID: " << edge.key.src
+ << ", EdgeType: " << edge.key.edge_type << ", EdgeRanking: " << edge.key.ranking
+ << ", VertexID: " << edge.key.dst << ", EdgeVersion: " << version;
auto key = NebulaKeyUtils::edgeKey(partId, edge.key.src, edge.key.edge_type,
edge.key.ranking, edge.key.dst, version);
data.emplace_back(std::move(key), std::move(edge.get_props())); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "storage/AddEdgesProcessor.h"
#include "base/NebulaKeyUtils.h"
#include <algorithm>
#include <limits>
#include "time/WallClock.h"
namespace nebula {
namespace storage {
void AddEdgesProcessor::process(const cpp2::AddEdgesRequest& req) {
auto spaceId = req.get_space_id();
auto version =
std::numeric_limits<int64_t>::max() - time::WallClock::fastNowInMicroSec();
callingNum_ = req.parts.size();
CHECK_NOTNULL(kvstore_);
std::for_each(req.parts.begin(), req.parts.end(), [&](auto& partEdges){
auto partId = partEdges.first;
std::vector<kvstore::KV> data;
std::for_each(partEdges.second.begin(), partEdges.second.end(), [&](auto& edge){
auto key = NebulaKeyUtils::edgeKey(partId, edge.key.src, edge.key.edge_type,
edge.key.ranking, edge.key.dst, version);
data.emplace_back(std::move(key), std::move(edge.get_props()));
});
doPut(spaceId, partId, std::move(data));
});
}
} // namespace storage
} // namespace nebula
| 1 | 21,977 | can we use PC's time to version in Distributed Systems? | vesoft-inc-nebula | cpp |
@@ -100,9 +100,11 @@
<% cart.comments.each do |c| %>
<div class='comment-item'>
<div class='row'>
- <p class='comment-sender col-sm-6 col-xs-12'>
- <strong>[email protected]</strong>
- </p>
+ <% unless c.user.nil? %>
+ <p class='comment-sender col-sm-6 col-xs-12'>
+ <strong><%= c.user_full_name %></strong>
+ </p>
+ <% end %>
<p class='comment-date col-sm-6 col-xs-12'>
<%= date_with_tooltip(c.created_at) %>
</p> | 1 | <div class="inset">
<div class="row">
<div class="col-md-12 col-xs-12">
<h1 class="communicart_header">
<%= cart.name %>
</h1>
<div class="communicart_description">
<p>
Purchase Request: <strong>#<%= cart.id %></strong>
</p>
<p>
Requested by:
<strong><%= cart.requester.full_name %></strong>
</p>
<% if cart.gsa_advantage? %>
<p>
GSA Advantage:
<strong>Cart #<%= cart.external_id %></strong>
</p>
<% end %>
</div>
</div>
<%= render partial: "carts/approval_status" %>
</div>
<div class="row">
<%= render partial: 'shared/cart_properties', locals: {cart: cart} %>
</div>
<% if cart.cart_items.any? %>
<div class="row">
<div class="col-md-12">
<%= render partial: 'carts/cart_items', locals: {cart: cart} %>
</div>
</div>
<% end %>
</div>
<%- if cart.flow == 'parallel' %>
<%- if cart.approvals.approved.any? %>
<div class="approval-status-container">
<div id="approval-status">
<h3>Request approved by</h3>
<ul>
<%- cart.approvals.approved.each do |approval| %>
<li class='icon-approved'>
<%= approval.user_email_address %>
<span class='timestamp'>on <%= l approval.updated_at %></span>
</li>
<%- end %>
</ul>
</div>
</div>
<%- end %>
<%- if cart.approvals.pending.any? %>
<div class="approval-status-container">
<div id="approval-status">
<h3>Waiting for approval from</h3>
<ul class="left">
<%- cart.approvals.pending.each do |approval| %>
<li class='icon-pending'>
<%= approval.user_email_address %>
</li>
<%- end %>
</ul>
<ul class="right">
<%- cart.approvals.approved.each do |approval| %>
<li class='icon-approved'>
<%= approval.user_email_address %>
</li>
<%- end %>
</ul>
</div>
</div>
<%- end %>
<%- end %>
<div class="cart-comments-container">
<div id="cart-comments">
<h3>Comments on this purchase request</h3>
<%- if @show_comments %>
<%= form_for [cart, Comment.new] do |f| %>
<%= f.text_area :comment_text, rows: 5 %>
<div class='row text-area-info-container'>
<div class='col-xs-7 col-sm-6 text-area-info-web'>
<p>
These comments will be sent to your requester through email
</p>
</div>
<p class='col-xs-5 col-sm-6 text-area-button'>
<%= submit_tag "Send note", id: :add_a_comment %>
</p>
</div>
<%- end %>
<% if cart.comments.any? %>
<% cart.comments.each do |c| %>
<div class='comment-item'>
<div class='row'>
<p class='comment-sender col-sm-6 col-xs-12'>
<strong>[email protected]</strong>
</p>
<p class='comment-date col-sm-6 col-xs-12'>
<%= date_with_tooltip(c.created_at) %>
</p>
</div>
<div class='row'>
<p class='comment-text col-sm-6 col-xs-12'>
<% unless c.user.nil? %>
<em>
<%= "#{c.user_full_name}: " %>
</em>
<% end %>
<%= c.comment_text %>
</p>
</div>
</div>
<% end %>
<% else %>
<p class='empty-list-label'>
No comments have been added yet
</p>
<% end %>
<%- end %>
</div>
</div>
| 1 | 12,399 | is this "unless" actually needed? | 18F-C2 | rb |
@@ -94,6 +94,10 @@ final class Stemmer {
}
List<CharsRef> list = new ArrayList<>();
+ if (length == 0) {
+ return list;
+ }
+
RootProcessor processor =
(stem, formID, stemException) -> {
list.add(newStem(stem, stemException)); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.hunspell;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.fst.FST;
/**
* Stemmer uses the affix rules declared in the Dictionary to generate one or more stems for a word.
* It conforms to the algorithm in the original hunspell algorithm, including recursive suffix
* stripping.
*/
final class Stemmer {
private final Dictionary dictionary;
private final StringBuilder segment = new StringBuilder();
// used for normalization
private final StringBuilder scratchSegment = new StringBuilder();
private char[] scratchBuffer = new char[32];
// it's '1' if we have no stem exceptions, otherwise every other form
// is really an ID pointing to the exception table
private final int formStep;
/**
* Constructs a new Stemmer which will use the provided Dictionary to create its stems.
*
* @param dictionary Dictionary that will be used to create the stems
*/
public Stemmer(Dictionary dictionary) {
this.dictionary = dictionary;
prefixReader = dictionary.prefixes == null ? null : dictionary.prefixes.getBytesReader();
suffixReader = dictionary.suffixes == null ? null : dictionary.suffixes.getBytesReader();
for (int level = 0; level < 3; level++) {
if (dictionary.prefixes != null) {
prefixArcs[level] = new FST.Arc<>();
}
if (dictionary.suffixes != null) {
suffixArcs[level] = new FST.Arc<>();
}
}
formStep = dictionary.formStep();
}
/**
* Find the stem(s) of the provided word.
*
* @param word Word to find the stems for
* @return List of stems for the word
*/
public List<CharsRef> stem(String word) {
return stem(word.toCharArray(), word.length());
}
/**
* Find the stem(s) of the provided word
*
* @param word Word to find the stems for
* @return List of stems for the word
*/
public List<CharsRef> stem(char[] word, int length) {
if (dictionary.mayNeedInputCleaning()) {
scratchSegment.setLength(0);
scratchSegment.append(word, 0, length);
if (dictionary.needsInputCleaning(scratchSegment)) {
CharSequence cleaned = dictionary.cleanInput(scratchSegment, segment);
scratchBuffer = ArrayUtil.grow(scratchBuffer, cleaned.length());
length = segment.length();
segment.getChars(0, length, scratchBuffer, 0);
word = scratchBuffer;
}
}
List<CharsRef> list = new ArrayList<>();
RootProcessor processor =
(stem, formID, stemException) -> {
list.add(newStem(stem, stemException));
return true;
};
if (!doStem(word, 0, length, WordContext.SIMPLE_WORD, processor)) {
return list;
}
WordCase wordCase = caseOf(word, length);
if (wordCase == WordCase.UPPER || wordCase == WordCase.TITLE) {
CaseVariationProcessor variationProcessor =
(variant, varLength, originalCase) ->
doStem(variant, 0, varLength, WordContext.SIMPLE_WORD, processor);
varyCase(word, length, wordCase, variationProcessor);
}
return list;
}
interface CaseVariationProcessor {
boolean process(char[] word, int length, WordCase originalCase);
}
boolean varyCase(char[] word, int length, WordCase wordCase, CaseVariationProcessor processor) {
if (wordCase == WordCase.UPPER) {
caseFoldTitle(word, length);
char[] aposCase = capitalizeAfterApostrophe(titleBuffer, length);
if (aposCase != null && !processor.process(aposCase, length, wordCase)) {
return false;
}
if (!processor.process(titleBuffer, length, wordCase)) {
return false;
}
if (dictionary.checkSharpS && !varySharpS(titleBuffer, length, processor)) {
return false;
}
}
if (dictionary.isDotICaseChangeDisallowed(word)) {
return true;
}
caseFoldLower(wordCase == WordCase.UPPER ? titleBuffer : word, length);
if (!processor.process(lowerBuffer, length, wordCase)) {
return false;
}
if (wordCase == WordCase.UPPER
&& dictionary.checkSharpS
&& !varySharpS(lowerBuffer, length, processor)) {
return false;
}
return true;
}
// temporary buffers for case variants
private char[] lowerBuffer = new char[8];
private char[] titleBuffer = new char[8];
/** returns EXACT_CASE,TITLE_CASE, or UPPER_CASE type for the word */
WordCase caseOf(char[] word, int length) {
if (dictionary.ignoreCase || length == 0 || Character.isLowerCase(word[0])) {
return WordCase.MIXED;
}
return WordCase.caseOf(word, length);
}
/** folds titlecase variant of word to titleBuffer */
private void caseFoldTitle(char[] word, int length) {
titleBuffer = ArrayUtil.grow(titleBuffer, length);
System.arraycopy(word, 0, titleBuffer, 0, length);
for (int i = 1; i < length; i++) {
titleBuffer[i] = dictionary.caseFold(titleBuffer[i]);
}
}
/** folds lowercase variant of word (title cased) to lowerBuffer */
private void caseFoldLower(char[] word, int length) {
lowerBuffer = ArrayUtil.grow(lowerBuffer, length);
System.arraycopy(word, 0, lowerBuffer, 0, length);
lowerBuffer[0] = dictionary.caseFold(lowerBuffer[0]);
}
// Special prefix handling for Catalan, French, Italian:
// prefixes separated by apostrophe (SANT'ELIA -> Sant'+Elia).
private static char[] capitalizeAfterApostrophe(char[] word, int length) {
for (int i = 1; i < length - 1; i++) {
if (word[i] == '\'') {
char next = word[i + 1];
char upper = Character.toUpperCase(next);
if (upper != next) {
char[] copy = ArrayUtil.copyOfSubArray(word, 0, length);
copy[i + 1] = Character.toUpperCase(upper);
return copy;
}
}
}
return null;
}
private boolean varySharpS(char[] word, int length, CaseVariationProcessor processor) {
Stream<String> result =
new Object() {
int findSS(int start) {
for (int i = start; i < length - 1; i++) {
if (word[i] == 's' && word[i + 1] == 's') {
return i;
}
}
return -1;
}
Stream<String> replaceSS(int start, int depth) {
if (depth > 5) { // cut off too large enumeration
return Stream.of(new String(word, start, length - start));
}
int ss = findSS(start);
if (ss < 0) {
return null;
} else {
String prefix = new String(word, start, ss - start);
Stream<String> tails = replaceSS(ss + 2, depth + 1);
if (tails == null) {
tails = Stream.of(new String(word, ss + 2, length - ss - 2));
}
return tails.flatMap(s -> Stream.of(prefix + "ss" + s, prefix + "ß" + s));
}
}
}.replaceSS(0, 0);
if (result == null) return true;
String src = new String(word, 0, length);
for (String s : result.collect(Collectors.toList())) {
if (!s.equals(src) && !processor.process(s.toCharArray(), s.length(), null)) {
return false;
}
}
return true;
}
boolean doStem(
char[] word, int offset, int length, WordContext context, RootProcessor processor) {
IntsRef forms = dictionary.lookupWord(word, offset, length);
if (forms != null) {
for (int i = 0; i < forms.length; i += formStep) {
int entryId = forms.ints[forms.offset + i];
// we can't add this form, it's a pseudostem requiring an affix
if (dictionary.hasFlag(entryId, dictionary.needaffix)) {
continue;
}
if ((context == WordContext.COMPOUND_BEGIN || context == WordContext.COMPOUND_MIDDLE)
&& dictionary.hasFlag(entryId, dictionary.compoundForbid)) {
return false;
}
if (!isRootCompatibleWithContext(context, -1, entryId)) {
continue;
}
if (!callProcessor(word, offset, length, processor, forms, i)) {
return false;
}
}
}
return stem(
word, offset, length, context, -1, Dictionary.FLAG_UNSET, -1, 0, true, false, processor);
}
/**
* Find the unique stem(s) of the provided word
*
* @param word Word to find the stems for
* @return List of stems for the word
*/
public List<CharsRef> uniqueStems(char[] word, int length) {
List<CharsRef> stems = stem(word, length);
if (stems.size() < 2) {
return stems;
}
CharArraySet terms = new CharArraySet(8, dictionary.ignoreCase);
List<CharsRef> deduped = new ArrayList<>();
for (CharsRef s : stems) {
if (!terms.contains(s)) {
deduped.add(s);
terms.add(s);
}
}
return deduped;
}
interface RootProcessor {
/**
* @param stem the text of the found dictionary entry
* @param formID internal id of the dictionary entry, e.g. to be used in {@link
* Dictionary#hasFlag(int, char)}
* @param morphDataId the id of the custom morphological data (0 if none), to be used with
* {@link Dictionary#morphData}
* @return whether the processing should be continued
*/
boolean processRoot(CharsRef stem, int formID, int morphDataId);
}
private String stemException(int morphDataId) {
if (morphDataId > 0) {
String data = dictionary.morphData.get(morphDataId);
int start = data.startsWith("st:") ? 0 : data.indexOf(" st:");
if (start >= 0) {
int nextSpace = data.indexOf(' ', start + 3);
return data.substring(start + 3, nextSpace < 0 ? data.length() : nextSpace);
}
}
return null;
}
private CharsRef newStem(CharsRef stem, int morphDataId) {
String exception = stemException(morphDataId);
if (dictionary.oconv != null) {
scratchSegment.setLength(0);
if (exception != null) {
scratchSegment.append(exception);
} else {
scratchSegment.append(stem.chars, stem.offset, stem.length);
}
dictionary.oconv.applyMappings(scratchSegment);
char[] cleaned = new char[scratchSegment.length()];
scratchSegment.getChars(0, cleaned.length, cleaned, 0);
return new CharsRef(cleaned, 0, cleaned.length);
} else {
if (exception != null) {
return new CharsRef(exception);
} else {
return stem;
}
}
}
// some state for traversing FSTs
private final FST.BytesReader prefixReader;
private final FST.BytesReader suffixReader;
@SuppressWarnings({"unchecked", "rawtypes"})
private final FST.Arc<IntsRef>[] prefixArcs = new FST.Arc[3];
@SuppressWarnings({"unchecked", "rawtypes"})
private final FST.Arc<IntsRef>[] suffixArcs = new FST.Arc[3];
/**
* Generates a list of stems for the provided word
*
* @param word Word to generate the stems for
* @param previous previous affix that was removed (so we dont remove same one twice)
* @param prevFlag Flag from a previous stemming step that need to be cross-checked with any
* affixes in this recursive step
* @param prefixId ID of the most inner removed prefix, so that when removing a suffix, it's also
* checked against the word
* @param recursionDepth current recursiondepth
* @param doPrefix true if we should remove prefixes
* @param previousWasPrefix true if the previous removal was a prefix: if we are removing a
* suffix, and it has no continuation requirements, it's ok. but two prefixes
* (COMPLEXPREFIXES) or two suffixes must have continuation requirements to recurse.
* @return whether the processing should be continued
*/
private boolean stem(
char[] word,
int offset,
int length,
WordContext context,
int previous,
char prevFlag,
int prefixId,
int recursionDepth,
boolean doPrefix,
boolean previousWasPrefix,
RootProcessor processor) {
if (doPrefix && dictionary.prefixes != null) {
FST<IntsRef> fst = dictionary.prefixes;
FST.Arc<IntsRef> arc = prefixArcs[recursionDepth];
fst.getFirstArc(arc);
IntsRef output = fst.outputs.getNoOutput();
int limit = dictionary.fullStrip ? length + 1 : length;
for (int i = 0; i < limit; i++) {
if (i > 0) {
output = Dictionary.nextArc(fst, arc, prefixReader, output, word[offset + i - 1]);
if (output == null) {
break;
}
}
if (!arc.isFinal()) {
continue;
}
IntsRef prefixes = fst.outputs.add(output, arc.nextFinalOutput());
for (int j = 0; j < prefixes.length; j++) {
int prefix = prefixes.ints[prefixes.offset + j];
if (prefix == previous) {
continue;
}
if (isAffixCompatible(prefix, prevFlag, recursionDepth, true, false, context)) {
char[] strippedWord = stripAffix(word, offset, length, i, prefix, true);
if (strippedWord == null) {
continue;
}
boolean pureAffix = strippedWord == word;
if (!applyAffix(
strippedWord,
pureAffix ? offset + i : 0,
pureAffix ? length - i : strippedWord.length,
context,
prefix,
previous,
-1,
recursionDepth,
true,
processor)) {
return false;
}
}
}
}
}
if (dictionary.suffixes != null) {
FST<IntsRef> fst = dictionary.suffixes;
FST.Arc<IntsRef> arc = suffixArcs[recursionDepth];
fst.getFirstArc(arc);
IntsRef output = fst.outputs.getNoOutput();
int limit = dictionary.fullStrip ? 0 : 1;
for (int i = length; i >= limit; i--) {
if (i < length) {
output = Dictionary.nextArc(fst, arc, suffixReader, output, word[offset + i]);
if (output == null) {
break;
}
}
if (!arc.isFinal()) {
continue;
}
IntsRef suffixes = fst.outputs.add(output, arc.nextFinalOutput());
for (int j = 0; j < suffixes.length; j++) {
int suffix = suffixes.ints[suffixes.offset + j];
if (suffix == previous) {
continue;
}
if (isAffixCompatible(
suffix, prevFlag, recursionDepth, false, previousWasPrefix, context)) {
char[] strippedWord = stripAffix(word, offset, length, length - i, suffix, false);
if (strippedWord == null) {
continue;
}
boolean pureAffix = strippedWord == word;
if (!applyAffix(
strippedWord,
pureAffix ? offset : 0,
pureAffix ? i : strippedWord.length,
context,
suffix,
previous,
prefixId,
recursionDepth,
false,
processor)) {
return false;
}
}
}
}
}
return true;
}
/**
* @return null if affix conditions isn't met; a reference to the same char[] if the affix has no
* strip data and can thus be simply removed, or a new char[] containing the word affix
* removal
*/
private char[] stripAffix(
char[] word, int offset, int length, int affixLen, int affix, boolean isPrefix) {
int deAffixedLen = length - affixLen;
int stripOrd = dictionary.affixData(affix, Dictionary.AFFIX_STRIP_ORD);
int stripStart = dictionary.stripOffsets[stripOrd];
int stripEnd = dictionary.stripOffsets[stripOrd + 1];
int stripLen = stripEnd - stripStart;
char[] stripData = dictionary.stripData;
int condition = dictionary.getAffixCondition(affix);
if (condition != 0) {
int deAffixedOffset = isPrefix ? offset + affixLen : offset;
if (!dictionary.patterns.get(condition).acceptsStem(word, deAffixedOffset, deAffixedLen)) {
return null;
}
}
if (stripLen == 0) return word;
char[] strippedWord = new char[stripLen + deAffixedLen];
System.arraycopy(
word,
offset + (isPrefix ? affixLen : 0),
strippedWord,
isPrefix ? stripLen : 0,
deAffixedLen);
System.arraycopy(stripData, stripStart, strippedWord, isPrefix ? 0 : deAffixedLen, stripLen);
return strippedWord;
}
private boolean isAffixCompatible(
int affix,
char prevFlag,
int recursionDepth,
boolean isPrefix,
boolean previousWasPrefix,
WordContext context) {
int append = dictionary.affixData(affix, Dictionary.AFFIX_APPEND);
if (context.isCompound()) {
if (!isPrefix && dictionary.hasFlag(append, dictionary.compoundForbid)) {
return false;
}
if (!context.isAffixAllowedWithoutSpecialPermit(isPrefix)
&& !dictionary.hasFlag(append, dictionary.compoundPermit)) {
return false;
}
if (context == WordContext.COMPOUND_END
&& !isPrefix
&& !previousWasPrefix
&& dictionary.hasFlag(append, dictionary.onlyincompound)) {
return false;
}
} else if (dictionary.hasFlag(append, dictionary.onlyincompound)) {
return false;
}
if (recursionDepth == 0) {
return true;
}
if (dictionary.isCrossProduct(affix)) {
// cross check incoming continuation class (flag of previous affix) against list.
return previousWasPrefix || dictionary.hasFlag(append, prevFlag);
}
return false;
}
/**
* Applies the affix rule to the given word, producing a list of stems if any are found
*
* @param strippedWord Char array containing the word with the affix removed and the strip added
* @param offset where the word actually starts in the array
* @param length the length of the stripped word
* @param affix HunspellAffix representing the affix rule itself
* @param prefixId when we already stripped a prefix, we can't simply recurse and check the
* suffix, unless both are compatible so we must check dictionary form against both to add it
* as a stem!
* @param recursionDepth current recursion depth
* @param prefix true if we are removing a prefix (false if it's a suffix)
* @return whether the processing should be continued
*/
private boolean applyAffix(
char[] strippedWord,
int offset,
int length,
WordContext context,
int affix,
int previousAffix,
int prefixId,
int recursionDepth,
boolean prefix,
RootProcessor processor) {
char flag = dictionary.affixData(affix, Dictionary.AFFIX_FLAG);
boolean skipLookup = needsAnotherAffix(affix, previousAffix, !prefix, prefixId);
IntsRef forms = skipLookup ? null : dictionary.lookupWord(strippedWord, offset, length);
if (forms != null) {
for (int i = 0; i < forms.length; i += formStep) {
int entryId = forms.ints[forms.offset + i];
if (dictionary.hasFlag(entryId, flag) || isFlagAppendedByAffix(prefixId, flag)) {
// confusing: in this one exception, we already chained the first prefix against the
// second,
// so it doesnt need to be checked against the word
boolean chainedPrefix = dictionary.complexPrefixes && recursionDepth == 1 && prefix;
if (!chainedPrefix && prefixId >= 0) {
char prefixFlag = dictionary.affixData(prefixId, Dictionary.AFFIX_FLAG);
if (!dictionary.hasFlag(entryId, prefixFlag)
&& !isFlagAppendedByAffix(affix, prefixFlag)) {
continue;
}
}
if (!isRootCompatibleWithContext(context, affix, entryId)) {
continue;
}
if (!callProcessor(strippedWord, offset, length, processor, forms, i)) {
return false;
}
}
}
}
if (dictionary.isCrossProduct(affix) && recursionDepth <= 1) {
boolean doPrefix;
if (recursionDepth == 0) {
if (prefix) {
prefixId = affix;
doPrefix = dictionary.complexPrefixes && dictionary.isSecondStagePrefix(flag);
// we took away the first prefix.
// COMPLEXPREFIXES = true: combine with a second prefix and another suffix
// COMPLEXPREFIXES = false: combine with a suffix
} else if (!dictionary.complexPrefixes && dictionary.isSecondStageSuffix(flag)) {
doPrefix = false;
// we took away a suffix.
// COMPLEXPREFIXES = true: we don't recurse! only one suffix allowed
// COMPLEXPREFIXES = false: combine with another suffix
} else {
return true;
}
} else {
doPrefix = false;
if (prefix && dictionary.complexPrefixes) {
prefixId = affix;
// we took away the second prefix: go look for another suffix
} else if (prefix || dictionary.complexPrefixes || !dictionary.isSecondStageSuffix(flag)) {
return true;
}
// we took away a prefix, then a suffix: go look for another suffix
}
return stem(
strippedWord,
offset,
length,
context,
affix,
flag,
prefixId,
recursionDepth + 1,
doPrefix,
prefix,
processor);
}
return true;
}
private boolean isRootCompatibleWithContext(WordContext context, int lastAffix, int entryId) {
if (!context.isCompound() && dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
return false;
}
if (context.isCompound() && context != WordContext.COMPOUND_RULE_END) {
char cFlag = context.requiredFlag(dictionary);
return dictionary.hasFlag(entryId, cFlag)
|| isFlagAppendedByAffix(lastAffix, cFlag)
|| dictionary.hasFlag(entryId, dictionary.compoundFlag)
|| isFlagAppendedByAffix(lastAffix, dictionary.compoundFlag);
}
return true;
}
private boolean callProcessor(
char[] word, int offset, int length, RootProcessor processor, IntsRef forms, int i) {
CharsRef stem = new CharsRef(word, offset, length);
int morphDataId = dictionary.hasCustomMorphData ? forms.ints[forms.offset + i + 1] : 0;
return processor.processRoot(stem, forms.ints[forms.offset + i], morphDataId);
}
private boolean needsAnotherAffix(int affix, int previousAffix, boolean isSuffix, int prefixId) {
char circumfix = dictionary.circumfix;
// if circumfix was previously set by a prefix, we must check this suffix,
// to ensure it has it, and vice versa
if (isSuffix
&& isFlagAppendedByAffix(prefixId, circumfix) != isFlagAppendedByAffix(affix, circumfix)) {
return true;
}
if (isFlagAppendedByAffix(affix, dictionary.needaffix)) {
return !isSuffix
|| previousAffix < 0
|| isFlagAppendedByAffix(previousAffix, dictionary.needaffix);
}
return false;
}
private boolean isFlagAppendedByAffix(int affixId, char flag) {
if (affixId < 0 || flag == Dictionary.FLAG_UNSET) return false;
int appendId = dictionary.affixData(affixId, Dictionary.AFFIX_APPEND);
return dictionary.hasFlag(appendId, flag);
}
}
| 1 | 40,765 | We don't accept empty words for lookup anymore, again | apache-lucene-solr | java |
@@ -0,0 +1,18 @@
+#!/usr/bin/env node
+
+const fs = require('fs')
+const yarnOut = fs.readFileSync(0, {encoding: 'utf8'})
+
+const [installTimeString] = /(?<=^Done in )\d+\.\d+(?=s\.$)/m.exec(yarnOut)
+const installTime = Number(installTimeString)
+
+console.log(`Install time: ${installTime}s`)
+
+if (installTime < 30) {
+ console.log("We're below 30 secs. That's awesome!")
+} else if (installTime < 50) {
+ console.log("We're below 50 secs. That's fine!")
+} else {
+ console.log("We're above 50 secs. That's not great!")
+ process.exit(1)
+} | 1 | 1 | 9,145 | We'll have to account for CI installations being faster than local ones. Do y'all think we should leave it at < 30 green / < 50 orange | >= 50 red or lower our thresholds? | blitz-js-blitz | js |
|
@@ -101,6 +101,7 @@ class Command(object):
EXECUTE_ASYNC_SCRIPT = "executeAsyncScript"
SET_SCRIPT_TIMEOUT = "setScriptTimeout"
SET_TIMEOUTS = "setTimeouts"
+ W3C_MINIMIZE_WINDOW = "w3cMinimizeWindow"
MAXIMIZE_WINDOW = "windowMaximize"
W3C_MAXIMIZE_WINDOW = "w3cMaximizeWindow"
GET_LOG = "getLog" | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class Command(object):
"""
Defines constants for the standard WebDriver commands.
While these constants have no meaning in and of themselves, they are
used to marshal commands through a service that implements WebDriver's
remote wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
"""
# Keep in sync with org.openqa.selenium.remote.DriverCommand
STATUS = "status"
NEW_SESSION = "newSession"
GET_ALL_SESSIONS = "getAllSessions"
DELETE_SESSION = "deleteSession"
CLOSE = "close"
QUIT = "quit"
GET = "get"
GO_BACK = "goBack"
GO_FORWARD = "goForward"
REFRESH = "refresh"
ADD_COOKIE = "addCookie"
GET_COOKIE = "getCookie"
GET_ALL_COOKIES = "getCookies"
DELETE_COOKIE = "deleteCookie"
DELETE_ALL_COOKIES = "deleteAllCookies"
FIND_ELEMENT = "findElement"
FIND_ELEMENTS = "findElements"
FIND_CHILD_ELEMENT = "findChildElement"
FIND_CHILD_ELEMENTS = "findChildElements"
CLEAR_ELEMENT = "clearElement"
CLICK_ELEMENT = "clickElement"
SEND_KEYS_TO_ELEMENT = "sendKeysToElement"
SEND_KEYS_TO_ACTIVE_ELEMENT = "sendKeysToActiveElement"
SUBMIT_ELEMENT = "submitElement"
UPLOAD_FILE = "uploadFile"
GET_CURRENT_WINDOW_HANDLE = "getCurrentWindowHandle"
W3C_GET_CURRENT_WINDOW_HANDLE = "w3cGetCurrentWindowHandle"
GET_WINDOW_HANDLES = "getWindowHandles"
W3C_GET_WINDOW_HANDLES = "w3cGetWindowHandles"
GET_WINDOW_SIZE = "getWindowSize"
W3C_GET_WINDOW_SIZE = "w3cGetWindowSize"
W3C_GET_WINDOW_POSITION = "w3cGetWindowPosition"
GET_WINDOW_POSITION = "getWindowPosition"
SET_WINDOW_SIZE = "setWindowSize"
W3C_SET_WINDOW_SIZE = "w3cSetWindowSize"
SET_WINDOW_RECT = "setWindowRect"
GET_WINDOW_RECT = "getWindowRect"
SET_WINDOW_POSITION = "setWindowPosition"
W3C_SET_WINDOW_POSITION = "w3cSetWindowPosition"
SWITCH_TO_WINDOW = "switchToWindow"
SWITCH_TO_FRAME = "switchToFrame"
SWITCH_TO_PARENT_FRAME = "switchToParentFrame"
GET_ACTIVE_ELEMENT = "getActiveElement"
W3C_GET_ACTIVE_ELEMENT = "w3cGetActiveElement"
GET_CURRENT_URL = "getCurrentUrl"
GET_PAGE_SOURCE = "getPageSource"
GET_TITLE = "getTitle"
EXECUTE_SCRIPT = "executeScript"
W3C_EXECUTE_SCRIPT = "w3cExecuteScript"
W3C_EXECUTE_SCRIPT_ASYNC = "w3cExecuteScriptAsync"
GET_ELEMENT_TEXT = "getElementText"
GET_ELEMENT_VALUE = "getElementValue"
GET_ELEMENT_TAG_NAME = "getElementTagName"
SET_ELEMENT_SELECTED = "setElementSelected"
IS_ELEMENT_SELECTED = "isElementSelected"
IS_ELEMENT_ENABLED = "isElementEnabled"
IS_ELEMENT_DISPLAYED = "isElementDisplayed"
GET_ELEMENT_LOCATION = "getElementLocation"
GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW = "getElementLocationOnceScrolledIntoView"
GET_ELEMENT_SIZE = "getElementSize"
GET_ELEMENT_RECT = "getElementRect"
GET_ELEMENT_ATTRIBUTE = "getElementAttribute"
GET_ELEMENT_PROPERTY = "getElementProperty"
GET_ELEMENT_VALUE_OF_CSS_PROPERTY = "getElementValueOfCssProperty"
ELEMENT_EQUALS = "elementEquals"
SCREENSHOT = "screenshot"
ELEMENT_SCREENSHOT = "elementScreenshot"
IMPLICIT_WAIT = "implicitlyWait"
EXECUTE_ASYNC_SCRIPT = "executeAsyncScript"
SET_SCRIPT_TIMEOUT = "setScriptTimeout"
SET_TIMEOUTS = "setTimeouts"
MAXIMIZE_WINDOW = "windowMaximize"
W3C_MAXIMIZE_WINDOW = "w3cMaximizeWindow"
GET_LOG = "getLog"
GET_AVAILABLE_LOG_TYPES = "getAvailableLogTypes"
# Alerts
DISMISS_ALERT = "dismissAlert"
W3C_DISMISS_ALERT = "w3cDismissAlert"
ACCEPT_ALERT = "acceptAlert"
W3C_ACCEPT_ALERT = "w3cAcceptAlert"
SET_ALERT_VALUE = "setAlertValue"
W3C_SET_ALERT_VALUE = "w3cSetAlertValue"
GET_ALERT_TEXT = "getAlertText"
W3C_GET_ALERT_TEXT = "w3cGetAlertText"
SET_ALERT_CREDENTIALS = "setAlertCredentials"
# Advanced user interactions
W3C_ACTIONS = "actions"
W3C_CLEAR_ACTIONS = "clearActionState"
CLICK = "mouseClick"
DOUBLE_CLICK = "mouseDoubleClick"
MOUSE_DOWN = "mouseButtonDown"
MOUSE_UP = "mouseButtonUp"
MOVE_TO = "mouseMoveTo"
# Screen Orientation
SET_SCREEN_ORIENTATION = "setScreenOrientation"
GET_SCREEN_ORIENTATION = "getScreenOrientation"
# Touch Actions
SINGLE_TAP = "touchSingleTap"
TOUCH_DOWN = "touchDown"
TOUCH_UP = "touchUp"
TOUCH_MOVE = "touchMove"
TOUCH_SCROLL = "touchScroll"
DOUBLE_TAP = "touchDoubleTap"
LONG_PRESS = "touchLongPress"
FLICK = "touchFlick"
# HTML 5
EXECUTE_SQL = "executeSql"
GET_LOCATION = "getLocation"
SET_LOCATION = "setLocation"
GET_APP_CACHE = "getAppCache"
GET_APP_CACHE_STATUS = "getAppCacheStatus"
CLEAR_APP_CACHE = "clearAppCache"
GET_LOCAL_STORAGE_ITEM = "getLocalStorageItem"
REMOVE_LOCAL_STORAGE_ITEM = "removeLocalStorageItem"
GET_LOCAL_STORAGE_KEYS = "getLocalStorageKeys"
SET_LOCAL_STORAGE_ITEM = "setLocalStorageItem"
CLEAR_LOCAL_STORAGE = "clearLocalStorage"
GET_LOCAL_STORAGE_SIZE = "getLocalStorageSize"
GET_SESSION_STORAGE_ITEM = "getSessionStorageItem"
REMOVE_SESSION_STORAGE_ITEM = "removeSessionStorageItem"
GET_SESSION_STORAGE_KEYS = "getSessionStorageKeys"
SET_SESSION_STORAGE_ITEM = "setSessionStorageItem"
CLEAR_SESSION_STORAGE = "clearSessionStorage"
GET_SESSION_STORAGE_SIZE = "getSessionStorageSize"
# Mobile
GET_NETWORK_CONNECTION = "getNetworkConnection"
SET_NETWORK_CONNECTION = "setNetworkConnection"
CURRENT_CONTEXT_HANDLE = "getCurrentContextHandle"
CONTEXT_HANDLES = "getContextHandles"
SWITCH_TO_CONTEXT = "switchToContext"
| 1 | 14,686 | No need for this to be `W3C_` since there is no JWP equivalent | SeleniumHQ-selenium | rb |
@@ -1,5 +1,12 @@
module RSpec
module Core
+ if defined?(::Random)
+ RandomNumberGenerator = ::Random
+ else
+ require 'rspec/core/backport_random'
+ RandomNumberGenerator = RSpec::Core::Backports::Random
+ end
+
# @private
module Ordering
# @private | 1 | module RSpec
module Core
# @private
module Ordering
# @private
# The default global ordering (defined order).
class Identity
def order(items)
items
end
end
# @private
# Orders items randomly.
class Random
def initialize(configuration)
@configuration = configuration
@used = false
end
def used?
@used
end
def order(items)
@used = true
Kernel.srand @configuration.seed
ordering = items.shuffle
Kernel.srand # reset random generation
ordering
end
end
# @private
# Orders items based on a custom block.
class Custom
def initialize(callable)
@callable = callable
end
def order(list)
@callable.call(list)
end
end
# @private
# Stores the different ordering strategies.
class Registry
def initialize(configuration)
@configuration = configuration
@strategies = {}
register(:random, Random.new(configuration))
identity = Identity.new
register(:defined, identity)
# The default global ordering is --defined.
register(:global, identity)
end
def fetch(name, &fallback)
@strategies.fetch(name, &fallback)
end
def register(sym, strategy)
@strategies[sym] = strategy
end
def used_random_seed?
@strategies[:random].used?
end
end
# @private
# Manages ordering configuration.
#
# @note This is not intended to be used externally. Use
# the APIs provided by `RSpec::Core::Configuration` instead.
class ConfigurationManager
attr_reader :seed, :ordering_registry
def initialize
@ordering_registry = Registry.new(self)
@seed = srand % 0xFFFF
@seed_forced = false
@order_forced = false
end
def seed_used?
ordering_registry.used_random_seed?
end
def seed=(seed)
return if @seed_forced
register_ordering(:global, ordering_registry.fetch(:random))
@seed = seed.to_i
end
def order=(type)
order, seed = type.to_s.split(':')
@seed = seed = seed.to_i if seed
ordering_name = if order.include?('rand')
:random
elsif order == 'defined'
:defined
end
register_ordering(:global, ordering_registry.fetch(ordering_name)) if ordering_name
end
def force(hash)
if hash.has_key?(:seed)
self.seed = hash[:seed]
@seed_forced = true
@order_forced = true
elsif hash.has_key?(:order)
self.order = hash[:order]
@order_forced = true
end
end
def register_ordering(name, strategy = Custom.new(Proc.new { |l| yield l }))
return if @order_forced && name == :global
ordering_registry.register(name, strategy)
end
end
end
end
end
| 1 | 10,779 | As far as I can tell, there's nothing that creates an instance of `Random` or that calls `rand` or `seed`. Am I missing it? If not, let's remove the `Random` class since we don't really need it and we can move the definitions of `shuffle` into `RSpec::Core::Ordering`. One less type :). | rspec-rspec-core | rb |
@@ -480,3 +480,13 @@ func (s *Server) NumSubscriptions() uint32 {
stats := s.sl.Stats()
return stats.NumSubs
}
+
+// Addr will return the net.Addr object for the current listener.
+func (s *Server) Addr() net.Addr {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.listener == nil {
+ return nil
+ }
+ return s.listener.Addr()
+} | 1 | // Copyright 2012-2014 Apcera Inc. All rights reserved.
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"os/signal"
"strconv"
"sync"
"time"
// Allow dynamic profiling.
_ "net/http/pprof"
"github.com/apcera/gnatsd/sublist"
)
// Info is the information sent to clients to help them understand information
// about this server.
type Info struct {
ID string `json:"server_id"`
Version string `json:"version"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required"`
SslRequired bool `json:"ssl_required"`
MaxPayload int `json:"max_payload"`
}
// Server is our main struct.
type Server struct {
mu sync.Mutex
info Info
infoJSON []byte
sl *sublist.Sublist
gcid uint64
opts *Options
trace bool
debug bool
running bool
listener net.Listener
clients map[uint64]*client
routes map[uint64]*client
remotes map[string]*client
done chan bool
start time.Time
http net.Listener
stats
routeListener net.Listener
grid uint64
routeInfo Info
routeInfoJSON []byte
rcQuit chan bool
}
type stats struct {
inMsgs int64
outMsgs int64
inBytes int64
outBytes int64
}
// New will setup a new server struct after parsing the options.
func New(opts *Options) *Server {
processOptions(opts)
info := Info{
ID: genID(),
Version: VERSION,
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
SslRequired: false,
MaxPayload: MAX_PAYLOAD_SIZE,
}
// Check for Auth items
if opts.Username != "" || opts.Authorization != "" {
info.AuthRequired = true
}
s := &Server{
info: info,
sl: sublist.New(),
opts: opts,
debug: opts.Debug,
trace: opts.Trace,
done: make(chan bool, 1),
start: time.Now(),
}
s.mu.Lock()
defer s.mu.Unlock()
// Setup logging with flags
s.LogInit()
// For tracking clients
s.clients = make(map[uint64]*client)
// For tracking routes and their remote ids
s.routes = make(map[uint64]*client)
s.remotes = make(map[string]*client)
// Used to kick out all of the route
// connect Go routines.
s.rcQuit = make(chan bool)
// Generate the info json
b, err := json.Marshal(s.info)
if err != nil {
Fatalf("Error marshalling INFO JSON: %+v\n", err)
}
s.infoJSON = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
s.handleSignals()
Logf("Starting gnatsd version %s", VERSION)
s.running = true
return s
}
// PrintAndDie is exported for access in other packages.
func PrintAndDie(msg string) {
fmt.Fprintf(os.Stderr, "%s\n", msg)
os.Exit(1)
}
// PrintServerAndExit will print our version and exit.
func PrintServerAndExit() {
fmt.Printf("gnatsd version %s\n", VERSION)
os.Exit(0)
}
// Signal Handling
func (s *Server) handleSignals() {
if s.opts.NoSigs {
return
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for sig := range c {
Debugf("Trapped Signal; %v", sig)
// FIXME, trip running?
Log("Server Exiting..")
os.Exit(0)
}
}()
}
// Protected check on running state
func (s *Server) isRunning() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.running
}
func (s *Server) logPid() {
pidStr := strconv.Itoa(os.Getpid())
err := ioutil.WriteFile(s.opts.PidFile, []byte(pidStr), 0660)
if err != nil {
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
}
}
// Start up the server, this will block.
// Start via a Go routine if needed.
func (s *Server) Start() {
// Log the pid to a file
if s.opts.PidFile != _EMPTY_ {
s.logPid()
}
// Start up the http server if needed.
if s.opts.HTTPPort != 0 {
s.StartHTTPMonitoring()
}
// Start up routing as well if needed.
if s.opts.ClusterPort != 0 {
s.StartRouting()
}
// Pprof http endpoint for the profiler.
if s.opts.ProfPort != 0 {
s.StartProfiler()
}
// Wait for clients.
s.AcceptLoop()
}
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
// and closing all associated clients.
func (s *Server) Shutdown() {
s.mu.Lock()
// Prevent issues with multiple calls.
if !s.running {
s.mu.Unlock()
return
}
s.running = false
// Copy off the clients
clients := make(map[uint64]*client)
for i, c := range s.clients {
clients[i] = c
}
// Number of done channel responses we expect.
doneExpected := 0
// Kick client AcceptLoop()
if s.listener != nil {
doneExpected++
s.listener.Close()
s.listener = nil
}
// Kick route AcceptLoop()
if s.routeListener != nil {
doneExpected++
s.routeListener.Close()
s.routeListener = nil
}
// Kick HTTP monitoring if its running
if s.http != nil {
doneExpected++
s.http.Close()
s.http = nil
}
// Release the solicited routes connect go routines.
close(s.rcQuit)
s.mu.Unlock()
// Close client connections
for _, c := range clients {
c.closeConnection()
}
// Block until the accept loops exit
for doneExpected > 0 {
<-s.done
doneExpected--
}
}
// AcceptLoop is exported for easier testing.
func (s *Server) AcceptLoop() {
hp := fmt.Sprintf("%s:%d", s.opts.Host, s.opts.Port)
Logf("Listening for client connections on %s", hp)
l, e := net.Listen("tcp", hp)
if e != nil {
Fatalf("Error listening on port: %d - %v", s.opts.Port, e)
return
}
Logf("gnatsd is ready")
// Setup state that can enable shutdown
s.mu.Lock()
s.listener = l
s.mu.Unlock()
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
Debug("Temporary Client Accept Error(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else if s.isRunning() {
Logf("Accept error: %v", err)
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.createClient(conn)
}
Log("Server Exiting..")
s.done <- true
}
// StartProfiler is called to enable dynamic profiling.
func (s *Server) StartProfiler() {
Logf("Starting profiling on http port %d", s.opts.ProfPort)
hp := fmt.Sprintf("%s:%d", s.opts.Host, s.opts.ProfPort)
go func() {
Log(http.ListenAndServe(hp, nil))
}()
}
// StartHTTPMonitoring will enable the HTTP monitoring port.
func (s *Server) StartHTTPMonitoring() {
Logf("Starting http monitor on port %d", s.opts.HTTPPort)
hp := fmt.Sprintf("%s:%d", s.opts.Host, s.opts.HTTPPort)
l, err := net.Listen("tcp", hp)
if err != nil {
Fatalf("Can't listen to the monitor port: %v", err)
}
mux := http.NewServeMux()
// Varz
mux.HandleFunc("/varz", s.HandleVarz)
// Connz
mux.HandleFunc("/connz", s.HandleConnz)
srv := &http.Server{
Addr: hp,
Handler: mux,
ReadTimeout: 2 * time.Second,
WriteTimeout: 2 * time.Second,
MaxHeaderBytes: 1 << 20,
}
s.http = l
go func() {
srv.Serve(s.http)
srv.Handler = nil
s.done <- true
}()
}
func (s *Server) createClient(conn net.Conn) *client {
c := &client{srv: s, nc: conn, opts: defaultOpts}
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
Debug("Client connection created", clientConnStr(c.nc), c.cid)
// Send our information.
s.sendInfo(c)
// Check for Auth
if s.info.AuthRequired {
ttl := secondsToDuration(s.opts.AuthTimeout)
c.setAuthTimer(ttl)
}
// Unlock to register
c.mu.Unlock()
// Register with the server.
s.mu.Lock()
s.clients[c.cid] = c
s.mu.Unlock()
return c
}
// Assume the lock is held upon entry.
func (s *Server) sendInfo(c *client) {
switch c.typ {
case CLIENT:
c.nc.Write(s.infoJSON)
case ROUTER:
c.nc.Write(s.routeInfoJSON)
}
}
func (s *Server) checkClientAuth(c *client) bool {
if !s.info.AuthRequired {
return true
}
// We require auth here, check the client
// Authorization tokens trump username/password
if s.opts.Authorization != "" {
return s.opts.Authorization == c.opts.Authorization
} else if s.opts.Username != c.opts.Username ||
s.opts.Password != c.opts.Password {
return false
}
return true
}
func (s *Server) checkRouterAuth(c *client) bool {
if !s.routeInfo.AuthRequired {
return true
}
if s.opts.ClusterUsername != c.opts.Username ||
s.opts.ClusterPassword != c.opts.Password {
return false
}
return true
}
// Check auth and return boolean indicating if client is ok
func (s *Server) checkAuth(c *client) bool {
switch c.typ {
case CLIENT:
return s.checkClientAuth(c)
case ROUTER:
return s.checkRouterAuth(c)
default:
return false
}
}
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
c.mu.Lock()
cid := c.cid
typ := c.typ
c.mu.Unlock()
s.mu.Lock()
switch typ {
case CLIENT:
delete(s.clients, cid)
case ROUTER:
delete(s.routes, cid)
if c.route != nil {
rc, ok := s.remotes[c.route.remoteID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, c.route.remoteID)
}
}
}
s.mu.Unlock()
}
/////////////////////////////////////////////////////////////////
// These are some helpers for accounting in functional tests.
/////////////////////////////////////////////////////////////////
// NumRoutes will report the number of registered routes.
func (s *Server) NumRoutes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.routes)
}
// NumRemotes will report number of registered remotes.
func (s *Server) NumRemotes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.remotes)
}
// NumClients will report the number of registered clients.
func (s *Server) NumClients() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.clients)
}
// NumSubscriptions will report how many subscriptions are active.
func (s *Server) NumSubscriptions() uint32 {
s.mu.Lock()
defer s.mu.Unlock()
stats := s.sl.Stats()
return stats.NumSubs
}
| 1 | 5,971 | Is this ever actually used? | nats-io-nats-server | go |
@@ -158,7 +158,7 @@ func (w *Waiter) receiptFromTipSet(ctx context.Context, msgCid cid.Cid, ts conse
if err != nil {
return nil, err
}
- res, err := consensus.ProcessTipSet(ctx, ts, st, vm.NewStorageMap(w.bs))
+ res, err := consensus.NewDefaultProcessor().ProcessTipSet(ctx, st, vm.NewStorageMap(w.bs), ts)
if err != nil {
return nil, err
} | 1 | package msgapi
import (
"context"
"fmt"
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
"gx/ipfs/QmRXf2uUSdGSunRJsM9wXSUNVwLUGCY3So5fAs7h2CBJVf/go-hamt-ipld"
bstore "gx/ipfs/QmS2aqUZLJp8kF1ihE5rvDGE5LvmKDPnx32w9Z1BW9xLV5/go-ipfs-blockstore"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log"
"github.com/filecoin-project/go-filecoin/actor/builtin"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm"
)
var log = logging.Logger("messageimpl")
// Waiter waits for a message to appear on chain.
type Waiter struct {
chainReader chain.ReadStore
cst *hamt.CborIpldStore
bs bstore.Blockstore
}
// NewWaiter returns a new Waiter.
func NewWaiter(chainStore chain.ReadStore, bs bstore.Blockstore, cst *hamt.CborIpldStore) *Waiter {
return &Waiter{
chainReader: chainStore,
cst: cst,
bs: bs,
}
}
// Wait invokes the callback when a message with the given cid appears on chain.
// It will find the message in both the case that it is already on chain and
// the case that it appears in a newly mined block. An error is returned if one is
// encountered. It is possible for both an error to be returned and the callback
// to be invoked, eg if an error was encountered trying to find the block
// in the block history but it suddenly appears in a newly mined block. Unless
// the context is canceled this method will block forever if the message never
// appears on chain.
//
// Note: this method does too much -- the callback should just receive the tipset
// containing the message and the caller should pull the receipt out of the block
// if in fact that's what it wants to do, using something like receiptFromTipset.
// Something like receiptFromTipset is necessary because not every message in
// a block will have a receipt in the tipset: it might be a duplicate message.
//
// TODO: This implementation will become prohibitively expensive since it
// traverses the entire chain. We should use an index instead.
// https://github.com/filecoin-project/go-filecoin/issues/1518
func (w *Waiter) Wait(ctx context.Context, msgCid cid.Cid, cb func(*types.Block, *types.SignedMessage, *types.MessageReceipt) error) error {
ctx = log.Start(ctx, "Waiter.Wait")
defer log.Finish(ctx)
log.Info("Calling Waiter.Wait")
// Ch will contain a stream of blocks to check for message (or errors).
// Blocks are either in new heaviest tipsets, or next oldest historical blocks.
ch := make(chan (interface{}))
// New blocks
newHeadCh := w.chainReader.HeadEvents().Sub(chain.NewHeadTopic)
defer w.chainReader.HeadEvents().Unsub(newHeadCh, chain.NewHeadTopic)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Historical blocks
historyCh := w.chainReader.BlockHistory(ctx)
// Merge historical and new block Channels.
go func() {
for raw := range newHeadCh {
ch <- raw
}
}()
go func() {
for raw := range historyCh {
ch <- raw
}
}()
for {
select {
case <-ctx.Done():
return ctx.Err()
case raw, more := <-ch:
if !more {
return errors.New("wait input channel closed without finding message")
}
switch raw.(type) {
case error:
e := raw.(error)
log.Errorf("Waiter.Wait: %s", e)
return e
case consensus.TipSet:
ts := raw.(consensus.TipSet)
for _, blk := range ts {
for _, msg := range blk.Messages {
c, err := msg.Cid()
if err != nil {
log.Errorf("Waiter.Wait: %s", err)
return err
}
if c.Equals(msgCid) {
recpt, err := w.receiptFromTipSet(ctx, msgCid, ts)
if err != nil {
return errors.Wrap(err, "error retrieving receipt from tipset")
}
return cb(blk, msg, recpt)
}
}
}
default:
return fmt.Errorf("Unexpected type in channel: %T", raw)
}
}
}
}
// receiptFromTipSet finds the receipt for the message with msgCid in the
// input tipset. This can differ from the message's receipt as stored in its
// parent block in the case that the message is in conflict with another
// message of the tipset.
func (w *Waiter) receiptFromTipSet(ctx context.Context, msgCid cid.Cid, ts consensus.TipSet) (*types.MessageReceipt, error) {
// Receipts always match block if tipset has only 1 member.
var rcpt *types.MessageReceipt
blks := ts.ToSlice()
if len(ts) == 1 {
b := blks[0]
// TODO: this should return an error if a receipt doesn't exist.
// Right now doing so breaks tests because our test helpers
// don't correctly apply messages when making test chains.
j, err := msgIndexOfTipSet(msgCid, ts, types.SortedCidSet{})
if err != nil {
return nil, err
}
if j < len(b.MessageReceipts) {
rcpt = b.MessageReceipts[j]
}
return rcpt, nil
}
// Apply all the tipset's messages to determine the correct receipts.
ids, err := ts.Parents()
if err != nil {
return nil, err
}
tsas, err := w.chainReader.GetTipSetAndState(ctx, ids.String())
if err != nil {
return nil, err
}
st, err := state.LoadStateTree(ctx, w.cst, tsas.TipSetStateRoot, builtin.Actors)
if err != nil {
return nil, err
}
res, err := consensus.ProcessTipSet(ctx, ts, st, vm.NewStorageMap(w.bs))
if err != nil {
return nil, err
}
// If this is a failing conflict message there is no application receipt.
if res.Failures.Has(msgCid) {
return nil, nil
}
j, err := msgIndexOfTipSet(msgCid, ts, res.Failures)
if err != nil {
return nil, err
}
// TODO: out of bounds receipt index should return an error.
if j < len(res.Results) {
rcpt = res.Results[j].Receipt
}
return rcpt, nil
}
// msgIndexOfTipSet returns the order in which msgCid appears in the canonical
// message ordering of the given tipset, or an error if it is not in the
// tipset.
// TODO: find a better home for this method
func msgIndexOfTipSet(msgCid cid.Cid, ts consensus.TipSet, fails types.SortedCidSet) (int, error) {
blks := ts.ToSlice()
types.SortBlocks(blks)
var duplicates types.SortedCidSet
var msgCnt int
for _, b := range blks {
for _, msg := range b.Messages {
c, err := msg.Cid()
if err != nil {
return -1, err
}
if fails.Has(c) {
continue
}
if duplicates.Has(c) {
continue
}
(&duplicates).Add(c)
if c.Equals(msgCid) {
return msgCnt, nil
}
msgCnt++
}
}
return -1, fmt.Errorf("message cid %s not in tipset", msgCid.String())
}
| 1 | 15,897 | this section of the codebase should be noted as a candidate for caching, and as a place where multiple tipsets is making things extra tricky | filecoin-project-venus | go |
@@ -1749,6 +1749,10 @@ translate_from_synchall_to_dispatch(thread_record_t *tr, thread_synch_state_t sy
arch_mcontext_reset_stolen_reg(dcontext, mc);
}
});
+ IF_AARCHXX({
+ set_stolen_reg_val(mc, (reg_t)os_get_dr_tls_base(dcontext));
+ IF_ARM(ASSERT_NOT_TESTED());
+ });
/* We send all threads, regardless of whether was in DR or not, to
* re-interp from translated cxt, to avoid having to handle stale
* local state problems if we simply resumed. | 1 | /* **********************************************************
* Copyright (c) 2012-2020 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* thread.c - thread synchronization
*/
#include "globals.h"
#include "synch.h"
#include "instrument.h" /* is_in_client_lib() */
#include "hotpatch.h" /* hotp_only_in_tramp() */
#include "fragment.h" /* get_at_syscall() */
#include "fcache.h" /* in_fcache() */
#include "translate.h"
#include "native_exec.h"
extern vm_area_vector_t *fcache_unit_areas; /* from fcache.c */
static bool started_detach = false; /* set before synchall */
bool doing_detach = false; /* set after synchall */
static void
synch_thread_yield(void);
/* Thread-local data
*/
typedef struct _thread_synch_data_t {
/* the following three fields are used to synchronize for detach, suspend
* thread, terminate thread, terminate process */
/* synch_lock and pending_synch_count act as a semaphore */
/* for check_wait_at_safe_spot() must use a spin_mutex_t */
spin_mutex_t *synch_lock;
/* we allow pending_synch_count to be read without holding the synch_lock
* so all updates should be ATOMIC as well as holding the lock */
int pending_synch_count;
/* To guarantee that the thread really has this permission you need to hold the
* synch_lock when you read this value. If the target thread is suspended, use a
* trylock, as it could have been suspended while holding synch_lock (i#2805).
*/
thread_synch_permission_t synch_perm;
/* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set
* to whether synch_with_all_threads was successful in synching this thread.
*/
bool synch_with_success;
/* Case 10101: allows threads waiting_at_safe_spot() to set their own
* contexts. This use sometimes requires a full os-specific context, which
* we hide behind a generic pointer and a size.
*/
priv_mcontext_t *set_mcontext;
void *set_context;
size_t set_context_size;
#ifdef X64
/* PR 263338: we have to pad for alignment */
byte *set_context_alloc;
#endif
} thread_synch_data_t;
/* This lock prevents more than one thread from being in the synch_with_all_
* threads method body at the same time (which would lead to deadlock as they
* tried to synchronize with each other)
*/
DECLARE_CXTSWPROT_VAR(mutex_t all_threads_synch_lock,
INIT_LOCK_FREE(all_threads_synch_lock));
/* pass either mc or both cxt and cxt_size */
static void
free_setcontext(priv_mcontext_t *mc, void *cxt, size_t cxt_size _IF_X64(byte *cxt_alloc))
{
if (mc != NULL) {
ASSERT(cxt == NULL);
global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER));
} else if (cxt != NULL) {
ASSERT(cxt_size > 0);
global_heap_free(IF_X64_ELSE(cxt_alloc, cxt), cxt_size HEAPACCT(ACCT_OTHER));
}
}
static void
synch_thread_free_setcontext(thread_synch_data_t *tsd)
{
free_setcontext(tsd->set_mcontext, tsd->set_context,
tsd->set_context_size _IF_X64(tsd->set_context_alloc));
tsd->set_mcontext = NULL;
tsd->set_context = NULL;
}
void
synch_init(void)
{
}
void
synch_exit(void)
{
ASSERT(uninit_thread_count == 0);
DELETE_LOCK(all_threads_synch_lock);
}
void
synch_thread_init(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)heap_alloc(
dcontext, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER));
dcontext->synch_field = (void *)tsd;
tsd->pending_synch_count = 0;
tsd->synch_perm = THREAD_SYNCH_NONE;
tsd->synch_with_success = false;
tsd->set_mcontext = NULL;
tsd->set_context = NULL;
/* the synch_lock is in unprotected memory so that check_wait_at_safe_spot
* can call the EXITING_DR hook before releasing it */
tsd->synch_lock = HEAP_TYPE_ALLOC(dcontext, spin_mutex_t, ACCT_OTHER, UNPROTECTED);
ASSIGN_INIT_SPINMUTEX_FREE(*tsd->synch_lock, synch_lock);
}
void
synch_thread_exit(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* Could be waiting at safe spot when we detach or exit */
synch_thread_free_setcontext(tsd);
DELETE_SPINMUTEX(*tsd->synch_lock);
/* Note that we do need to free this in non-debug builds since, despite
* appearances, UNPROTECTED_LOCAL is acutally allocated on a global
* heap. */
HEAP_TYPE_FREE(dcontext, tsd->synch_lock, spin_mutex_t, ACCT_OTHER, UNPROTECTED);
#ifdef DEBUG
/* for non-debug we do fast exit path and don't free local heap */
/* clean up tsd fields here */
heap_free(dcontext, tsd, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER));
#endif
}
/* Check for a no-xfer permission. Currently used only for case 6821,
* where we need to distinguish three groups: unsafe (wait for safe
* point), safe and translatable, and safe but not translatable.
*/
bool
thread_synch_state_no_xfer(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* We use a trylock in case the thread is suspended holding synch_lock (i#2805). */
if (spinmutex_trylock(tsd->synch_lock)) {
bool res = (tsd->synch_perm == THREAD_SYNCH_NO_LOCKS_NO_XFER ||
tsd->synch_perm == THREAD_SYNCH_VALID_MCONTEXT_NO_XFER);
spinmutex_unlock(tsd->synch_lock);
return res;
}
return false;
}
bool
thread_synch_check_state(dcontext_t *dcontext, thread_synch_permission_t desired_perm)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* We support calling this routine from our signal handler when it has interrupted
* DR and might be holding tsd->synch_lock or other locks.
* We first check synch_perm w/o a lock and if it's not at least
* THREAD_SYNCH_NO_LOCKS we do not attempt to grab synch_lock (we'd hit rank order
* violations). If that check passes, the only problematic lock is if we already
* hold synch_lock, so we use test and trylocks there.
*/
if (desired_perm < THREAD_SYNCH_NO_LOCKS) {
ASSERT(desired_perm == THREAD_SYNCH_NONE);
return true;
}
if (!THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm))
return false;
/* barrier to keep the 1st check above on this side of the lock below */
#ifdef WINDOWS
MemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
#endif
/* We use a trylock in case the thread is suspended holding synch_lock (i#2805).
* We start with testlock to avoid recursive lock assertions.
*/
if (!spinmutex_testlock(tsd->synch_lock) && spinmutex_trylock(tsd->synch_lock)) {
bool res = THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm);
spinmutex_unlock(tsd->synch_lock);
return res;
}
return false;
}
/* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set to
* whether synch_with_all_threads was successful in synching this thread.
* Cannot be called when THREAD_SYNCH_*_AND_CLEANED was requested as the
* thread-local memory will be freed on success!
*/
bool
thread_synch_successful(thread_record_t *tr)
{
thread_synch_data_t *tsd;
ASSERT(tr != NULL && tr->dcontext != NULL);
ASSERT_OWN_MUTEX(true, &all_threads_synch_lock);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
tsd = (thread_synch_data_t *)tr->dcontext->synch_field;
return tsd->synch_with_success;
}
#ifdef UNIX
/* i#2659: the kernel is now doing auto-restart so we have to check for the
* pc being at the syscall.
*/
static bool
is_after_or_restarted_do_syscall(dcontext_t *dcontext, app_pc pc, bool check_vsyscall)
{
if (is_after_do_syscall_addr(dcontext, pc))
return true;
if (check_vsyscall && pc == vsyscall_sysenter_return_pc)
return true;
if (!get_at_syscall(dcontext)) /* rule out having just reached the syscall */
return false;
int syslen = syscall_instr_length(dr_get_isa_mode(dcontext));
if (is_after_do_syscall_addr(dcontext, pc + syslen))
return true;
if (check_vsyscall && pc + syslen == vsyscall_sysenter_return_pc)
return true;
return false;
}
#endif
bool
is_at_do_syscall(dcontext_t *dcontext, app_pc pc, byte *esp)
{
app_pc buf[2];
bool res = d_r_safe_read(esp, sizeof(buf), buf);
if (!res) {
ASSERT(res); /* we expect the stack to always be readable */
return false;
}
if (does_syscall_ret_to_callsite()) {
#ifdef WINDOWS
if (get_syscall_method() == SYSCALL_METHOD_INT && DYNAMO_OPTION(sygate_int)) {
return (pc == after_do_syscall_addr(dcontext) &&
buf[0] == after_do_syscall_code(dcontext));
} else {
return pc == after_do_syscall_code(dcontext);
}
#else
return is_after_or_restarted_do_syscall(dcontext, pc, false /*!vsys*/);
#endif
} else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
#ifdef WINDOWS
if (pc == vsyscall_after_syscall) {
if (DYNAMO_OPTION(sygate_sysenter))
return buf[1] == after_do_syscall_code(dcontext);
else
return buf[0] == after_do_syscall_code(dcontext);
} else {
/* not at a system call, could still have tos match after_do_syscall
* either by chance or because we leak that value on the apps stack
* (a non transparency) */
ASSERT_CURIOSITY(buf[0] != after_do_syscall_code(dcontext));
return false;
}
#else
/* Even when the main syscall method is sysenter, we also have a
* do_int_syscall and do_clone_syscall that use int, so check both.
* Note that we don't modify the stack, so once we do sysenter syscalls
* inlined in the cache (PR 288101) we'll need some mechanism to
* distinguish those: but for now if a sysenter instruction is used it
* has to be do_syscall since DR's own syscalls are ints.
*/
return is_after_or_restarted_do_syscall(dcontext, pc, true /*vsys*/);
#endif
}
/* we can reach here w/ a fault prior to 1st syscall on Linux */
IF_WINDOWS(ASSERT_NOT_REACHED());
return false;
}
/* Helper function for at_safe_spot(). Note state for client-owned threads isn't
* considered valid since it may be holding client locks and doesn't correspond to
* an actual app state. Caller should handle client-owned threads appropriately. */
static bool
is_native_thread_state_valid(dcontext_t *dcontext, app_pc pc, byte *esp)
{
/* ref case 3675, the assumption is that if we aren't executing
* out of dr memory and our stack isn't in dr memory (to disambiguate
* pc in kernel32, ntdll etc.) then the app has a valid native context.
* However, we can't call is_dynamo_address() as it (and its children)
* grab too many different locks, all of which we would have to check
* here in the same manner as fcache_unit_areas.lock in at_safe_spot(). So
* instead we just check the pc for the dr dll, interception code, and
* do_syscall regions and check the stack against the thread's dr stack
* and the d_r_initstack, all of which we can do without grabbing any locks.
* That should be sufficient at this point, FIXME try to use something
* like is_dynamo_address() to make this more maintainable */
/* For sysenter system calls we also have to check the top of the stack
* for the after_do_syscall_address to catch the do_syscall @ syscall
* itself case. */
ASSERT(esp != NULL);
ASSERT(is_thread_currently_native(dcontext->thread_record));
#ifdef WINDOWS
if (pc == (app_pc)thread_attach_takeover) {
/* We are trying to take over this thread but it has not yet been
* scheduled. It was native, and can't hold any DR locks.
*/
return true;
}
#endif
return (!is_in_dynamo_dll(pc) &&
IF_WINDOWS(!is_part_of_interception(pc) &&)(
!in_generated_routine(dcontext, pc) ||
/* we allow native thread to be at do_syscall - for int syscalls the pc
* (syscall return point) will be in do_syscall (so in generated routine)
* xref case 9333 */
is_at_do_syscall(dcontext, pc, esp)) &&
!is_on_initstack(esp) && !is_on_dstack(dcontext, esp) &&
IF_CLIENT_INTERFACE(!is_in_client_lib(pc) &&)
/* xref PR 200067 & 222812 on client-owned native threads */
IF_CLIENT_INTERFACE(!IS_CLIENT_THREAD(dcontext) &&)
#ifdef HOT_PATCHING_INTERFACE
/* Shouldn't be in the middle of executing a hotp_only patch. The
* check for being in hotp_dll is DR_WHERE_HOTPATCH because the patch can
* change esp.
*/
(dcontext->whereami != DR_WHERE_HOTPATCH &&
/* dynamo dll check has been done */
!hotp_only_in_tramp(pc)) &&
#endif
true /* no effect, simplifies ifdef handling with && above */
);
}
/* Translates the context mcontext for the given thread trec. If
* restore_memory is true, also restores any memory values that were
* shifted (primarily due to clients). If restore_memory is true, the
* caller should always relocate the translated thread, as it may not
* execute properly if left at its current location (it could be in the
* middle of client code in the cache).
* If recreate_app_state() is called, f will be passed through to it.
*
* Like any instance where a thread_record_t is used by a thread other than its
* owner, the caller must hold the thread_initexit_lock to ensure that it
* remains valid.
* Requires thread trec is at_safe_spot().
*/
bool
translate_mcontext(thread_record_t *trec, priv_mcontext_t *mcontext, bool restore_memory,
fragment_t *f)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field;
bool res;
recreate_success_t success;
bool native_translate = false;
ASSERT(tsd->pending_synch_count >= 0);
/* check if native thread */
if (is_thread_currently_native(trec)) {
/* running natively, no need to translate unless at do_syscall for an
* intercepted-via-trampoline syscall which we allow now for case 9333 */
#ifdef CLIENT_INTERFACE
if (IS_CLIENT_THREAD(trec->dcontext)) {
/* don't need to translate anything */
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " is client "
"thread, no translation needed\n",
trec->id);
return true;
}
#endif
if (is_native_thread_state_valid(trec->dcontext, (app_pc)mcontext->pc,
(byte *)mcontext->xsp)) {
#ifdef WINDOWS
if ((app_pc)mcontext->pc == (app_pc)thread_attach_takeover) {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " at "
"takeover point\n",
trec->id);
thread_attach_translate(trec->dcontext, mcontext, restore_memory);
return true;
}
#endif
if (is_at_do_syscall(trec->dcontext, (app_pc)mcontext->pc,
(byte *)mcontext->xsp)) {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " running "
"natively, at do_syscall so translation needed\n",
trec->id);
native_translate = true;
} else {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " running "
"natively, no translation needed\n",
trec->id);
return true;
}
} else {
/* now that do_syscall is a safe spot for native threads we shouldn't get
* here for get context on self, FIXME - is however possible to get here
* via get_context on unsuspended thread (result of which is technically
* undefined according to MS), see get_context post sys comments
* (should prob. synch there in which case can assert here) */
ASSERT(trec->id != d_r_get_thread_id());
ASSERT_CURIOSITY(false &&
"translate failure, likely get context on "
"unsuspended native thread");
/* we'll just try to translate and hope for the best */
native_translate = true;
}
}
if (!native_translate) {
/* check if waiting at a good spot */
spinmutex_lock(tsd->synch_lock);
res = THREAD_SYNCH_SAFE(tsd->synch_perm, THREAD_SYNCH_VALID_MCONTEXT);
spinmutex_unlock(tsd->synch_lock);
if (res) {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " waiting at "
"valid mcontext point, copying over\n",
trec->id);
DOLOG(2, LOG_SYNCH, {
LOG(THREAD_GET, LOG_SYNCH, 2, "Thread State\n");
dump_mcontext(get_mcontext(trec->dcontext), THREAD_GET, DUMP_NOT_XML);
});
*mcontext = *get_mcontext(trec->dcontext);
#ifdef CLIENT_INTERFACE
if (dr_xl8_hook_exists()) {
if (!instrument_restore_nonfcache_state(trec->dcontext, true, mcontext))
return false;
}
#endif
return true;
}
}
/* In case 4148 we see a thread calling NtGetContextThread on itself, which
* is undefined according to MS but it does get the syscall address, so it's
* fine with us. For other threads the app shouldn't be asking about them
* unless they're suspended, and the same goes for us.
*/
ASSERT_CURIOSITY(trec->dcontext->whereami == DR_WHERE_FCACHE ||
trec->dcontext->whereami == DR_WHERE_SIGNAL_HANDLER ||
native_translate || trec->id == d_r_get_thread_id());
LOG(THREAD_GET, LOG_SYNCH, 2,
"translate context, thread " TIDFMT " at pc_recreatable spot translating\n",
trec->id);
success = recreate_app_state(trec->dcontext, mcontext, restore_memory, f);
if (success != RECREATE_SUCCESS_STATE) {
/* should never happen right?
* actually it does when deciding whether can deliver a signal
* immediately (PR 213040).
*/
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " unable to translate context at pc"
" = " PFX "\n",
trec->id, mcontext->pc);
SYSLOG_INTERNAL_WARNING_ONCE("failed to translate");
return false;
}
return true;
}
static bool
waiting_at_safe_spot(thread_record_t *trec, thread_synch_state_t desired_state)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field;
ASSERT(tsd->pending_synch_count >= 0);
/* Check if waiting at a good spot. We can't spin in case the suspended thread is
* holding this lock (e.g., i#2805). We only need the lock to check synch_perm.
*/
if (spinmutex_trylock(tsd->synch_lock)) {
thread_synch_permission_t perm = tsd->synch_perm;
bool res = THREAD_SYNCH_SAFE(perm, desired_state);
spinmutex_unlock(tsd->synch_lock);
if (res) {
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " waiting at safe spot (synch_perm=%d)\n", trec->id,
perm);
return true;
}
} else {
LOG(THREAD_GET, LOG_SYNCH, 2,
"at_safe_spot unable to get locks to test if thread " TIDFMT " is waiting "
"at safe spot\n",
trec->id);
}
return false;
}
#ifdef CLIENT_SIDELINE
static bool
should_suspend_client_thread(dcontext_t *dcontext, thread_synch_state_t desired_state)
{
/* Marking un-suspendable does not apply to cleaning/terminating */
ASSERT(IS_CLIENT_THREAD(dcontext));
return (THREAD_SYNCH_IS_CLEANED(desired_state) || dcontext->client_data->suspendable);
}
#endif
/* checks whether the thread trec is at a spot suitable for requested define
* desired_state
* Requires that trec thread is suspended */
/* Note that since trec is potentially suspended at an arbitrary point,
* this function (and any function it calls) cannot call mutex_lock as
* trec thread may hold a lock. It is ok for at_safe_spot to return false if
* it can't obtain a lock on the first try. FIXME : in the long term we may
* want to go to a locking model that stores the thread id of the owner in
* which case we can check for this situation directly
*/
bool
at_safe_spot(thread_record_t *trec, priv_mcontext_t *mc,
thread_synch_state_t desired_state)
{
bool safe = false;
if (waiting_at_safe_spot(trec, desired_state))
return true;
#ifdef ARM
if (TESTANY(EFLAGS_IT, mc->cpsr)) {
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " not at safe spot (pc=" PFX " in an IT block) for %d\n",
trec->id, mc->pc, desired_state);
return false;
}
#endif
/* check if suspended at good spot */
/* FIXME: right now don't distinguish between suspend and term privileges
* even though suspend is stronger requirement, are the checks below
* sufficient */
/* FIXME : check with respect to flush, should be ok */
/* test fcache_unit_areas.lock (from fcache.c) before calling recreate_app_state
* since it calls in_fcache() which uses the lock (if we are in_fcache()
* assume other locks are not a problem (so is_dynamo_address is fine)) */
/* Right now the only dr code that ends up in the cache is our DLL main
* (which we'll reduce/get rid of with libc independence), our takeover
* from preinject return stack, and the callback.c interception code.
* FIXME : test for just these and ASSERT(!is_dynamo_address) otherwise */
if (is_thread_currently_native(trec)) {
/* thread is running native, verify is not in dr code */
#ifdef CLIENT_INTERFACE
/* We treat client-owned threads (such as a client nudge thread) as native and
* consider them safe if they are in the client_lib. Since they might own client
* locks that could block application threads from progressing, we synchronize
* with them last. FIXME - xref PR 231301 - since we can't disambiguate
* client->ntdll/gencode which is safe from client->dr->ntdll/gencode which isn't
* we disallow both. This could hurt synchronization efficiency if the client
* owned thread spent most of its execution time calling out of its lib to ntdll
* routines or generated code. */
if (IS_CLIENT_THREAD(trec->dcontext)) {
safe = (trec->dcontext->client_data->client_thread_safe_for_synch ||
is_in_client_lib(mc->pc)) &&
/* Do not cleanup/terminate a thread holding a client lock (PR 558463) */
/* Actually, don't consider a thread holding a client lock to be safe
* at all (PR 609569): client should use
* dr_client_thread_set_suspendable(false) if its thread spends a lot
* of time holding locks.
*/
(!should_suspend_client_thread(trec->dcontext, desired_state) ||
trec->dcontext->client_data->mutex_count == 0);
}
#endif
if (is_native_thread_state_valid(trec->dcontext, mc->pc, (byte *)mc->xsp)) {
safe = true;
/* We should always be able to translate a valid native state, but be
* sure to check before thread_attach_exit().
*/
ASSERT(translate_mcontext(trec, mc, false /*just querying*/, NULL));
#ifdef WINDOWS
if (mc->pc == (app_pc)thread_attach_takeover &&
THREAD_SYNCH_IS_CLEANED(desired_state)) {
/* The takeover data will be freed at process exit, but we might
* clean up a thread mid-run, so make sure we free the data.
*/
thread_attach_exit(trec->dcontext, mc);
}
#endif
}
#ifdef CLIENT_INTERFACE
} else if (desired_state == THREAD_SYNCH_TERMINATED_AND_CLEANED &&
trec->dcontext->whereami == DR_WHERE_FCACHE &&
trec->dcontext->client_data->at_safe_to_terminate_syscall) {
/* i#1420: At safe to terminate syscall like dr_sleep in a clean call.
* XXX: A thread in dr_sleep might not be safe to terminate for some
* corner cases: for example, a client may hold a lock and then go sleep,
* terminating it may mess the client up for not releasing the lock.
* We limit this to the thread being in fcache (i.e., from a clean call)
* to rule out some corner cases.
*/
safe = true;
#endif
} else if ((!WRITE_LOCK_HELD(&fcache_unit_areas->lock) &&
/* even though we only need the read lock, if our target holds it
* and a 3rd thread requests the write lock, we'll hang if we
* ask for the read lock (case 7493)
*/
!READ_LOCK_HELD(&fcache_unit_areas->lock)) &&
recreate_app_state(trec->dcontext, mc, false /*just query*/, NULL) ==
RECREATE_SUCCESS_STATE &&
/* It's ok to call is_dynamo_address even though it grabs many
* locks because recreate_app_state succeeded.
*/
!is_dynamo_address(mc->pc)) {
safe = true;
}
if (safe) {
ASSERT(trec->dcontext->whereami == DR_WHERE_FCACHE ||
trec->dcontext->whereami == DR_WHERE_SIGNAL_HANDLER ||
is_thread_currently_native(trec));
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " suspended at safe spot pc=" PFX "\n", trec->id, mc->pc);
return true;
}
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " not at safe spot (pc=" PFX ") for %d\n", trec->id, mc->pc,
desired_state);
return false;
}
/* a fast way to tell a thread if it should call check_wait_at_safe_spot
* if translating context would be expensive */
bool
should_wait_at_safe_spot(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
return (tsd->pending_synch_count != 0);
}
/* use with care! normally check_wait_at_safe_spot() should be called instead */
void
set_synch_state(dcontext_t *dcontext, thread_synch_permission_t state)
{
if (state >= THREAD_SYNCH_NO_LOCKS)
ASSERT_OWN_NO_LOCKS();
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* We have a wart in the settings here (i#2805): a caller can set
* THREAD_SYNCH_NO_LOCKS, yet here we're acquiring locks. In fact if this thread
* is suspended in between the lock and the unset of synch_perm from
* THREAD_SYNCH_NO_LOCKS back to THREAD_SYNCH_NONE, it can cause problems. We
* have everyone who might query in such a state use a trylock and assume
* synch_perm is THREAD_SYNCH_NONE if the lock cannot be acquired.
*/
spinmutex_lock(tsd->synch_lock);
tsd->synch_perm = state;
spinmutex_unlock(tsd->synch_lock);
}
/* checks to see if any threads are waiting to synch with this one and waits
* if they are
* cur_state - a given permission define from above that describes the current
* state of the caller
* NOTE - Requires the caller is !could_be_linking (i.e. not in an
* enter_couldbelinking state)
*/
void
check_wait_at_safe_spot(dcontext_t *dcontext, thread_synch_permission_t cur_state)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
app_pc pc;
byte cxt[MAX(CONTEXT_HEAP_SIZE_OPAQUE, sizeof(priv_mcontext_t))];
bool set_context = false;
bool set_mcontext = false;
if (tsd->pending_synch_count == 0 || cur_state == THREAD_SYNCH_NONE)
return;
ASSERT(tsd->pending_synch_count >= 0);
pc = get_mcontext(dcontext)->pc;
LOG(THREAD, LOG_SYNCH, 2, "waiting for synch with state %d (pc " PFX ")\n", cur_state,
pc);
if (cur_state == THREAD_SYNCH_VALID_MCONTEXT) {
ASSERT(!is_dynamo_address(pc));
/* for detach must set this here and now */
IF_WINDOWS(IF_CLIENT_INTERFACE(set_last_error(dcontext->app_errno)));
}
spinmutex_lock(tsd->synch_lock);
tsd->synch_perm = cur_state;
/* Since can be killed, suspended, etc. must call the exit dr hook. But, to
* avoid races, we must do so before giving up the synch_lock. This is why
* that lock has to be in unprotected memory. FIXME - for single thread in
* dr this will lead to rank order violation between dr exclusivity lock
* and the synch_lock with no easy workaround (real deadlocks possible).
* Luckily we'll prob. never use that option. */
if (INTERNAL_OPTION(single_thread_in_DR)) {
ASSERT_NOT_IMPLEMENTED(false);
}
EXITING_DR();
/* Ref case 5074, for us/app to successfully SetThreadContext at
* this synch point, this thread can NOT be at a system call. So, for
* case 10101, we instead have threads that are waiting_at_safe_spot()
* set their own contexts, allowing us to make system calls here.
* We don't yet handle the detach case, so it still requires no system
* calls, including the act of releasing the synch_lock
* which is why that lock has to be a user mode spin yield lock.
* FIXME: we could change tsd->synch_lock back to a regular lock
* once we have detach handling system calls here.
*/
spinmutex_unlock(tsd->synch_lock);
while (tsd->pending_synch_count > 0 && tsd->synch_perm != THREAD_SYNCH_NONE) {
STATS_INC_DC(dcontext, synch_loops_wait_safe);
#ifdef WINDOWS
if (started_detach) {
/* We spin for any non-detach synchs encountered during detach
* since we have no flag telling us this synch is for detach. */
/* Ref case 5074, can NOT use os_thread_yield here. This must be a user
* mode spin loop. */
SPINLOCK_PAUSE();
} else {
#endif
/* FIXME case 10100: replace this sleep/yield with a wait_for_event() */
synch_thread_yield();
#ifdef WINDOWS
}
#endif
}
/* Regain the synch_lock before ENTERING_DR to avoid races with getting
* suspended/killed in the middle of ENTERING_DR (before synch_perm is
* reset to NONE). */
/* Ref case 5074, for detach we still can NOT use os_thread_yield here (no system
* calls) so don't allow the spinmutex_lock to yield while grabbing the lock. */
spinmutex_lock_no_yield(tsd->synch_lock);
ENTERING_DR();
tsd->synch_perm = THREAD_SYNCH_NONE;
if (tsd->set_mcontext != NULL || tsd->set_context != NULL) {
IF_WINDOWS(ASSERT(!started_detach));
/* Make a local copy */
ASSERT(sizeof(cxt) >= sizeof(priv_mcontext_t));
if (tsd->set_mcontext != NULL) {
set_mcontext = true;
memcpy(cxt, tsd->set_mcontext, sizeof(*tsd->set_mcontext));
} else {
set_context = true;
memcpy(cxt, tsd->set_context, tsd->set_context_size);
}
synch_thread_free_setcontext(tsd); /* sets to NULL for us */
}
spinmutex_unlock(tsd->synch_lock);
LOG(THREAD, LOG_SYNCH, 2, "done waiting for synch with state %d (pc " PFX ")\n",
cur_state, pc);
if (set_mcontext || set_context) {
/* FIXME: see comment in dispatch.c check_wait_at_safe_spot() call
* about problems with KSTART(fcache_* differences bet the target
* being at the synch point vs in the cache.
*/
if (set_mcontext)
thread_set_self_mcontext((priv_mcontext_t *)cxt);
else
thread_set_self_context((void *)cxt);
ASSERT_NOT_REACHED();
}
}
/* adjusts the pending synch count */
void
adjust_wait_at_safe_spot(dcontext_t *dcontext, int amt)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
ASSERT(tsd->pending_synch_count >= 0);
spinmutex_lock(tsd->synch_lock);
ATOMIC_ADD(int, tsd->pending_synch_count, amt);
spinmutex_unlock(tsd->synch_lock);
}
/* Case 10101: Safely sets the context for a target thread that may be waiting at a
* safe spot, in which case we do not want to directly do a setcontext as the return
* from the yield or wait system call will mess up the state (case 5074).
* Assumes that cxt was allocated on the global heap, and frees it, rather than
* making its own copy (as an optimization).
* Does not work on the executing thread.
* Caller must hold thread_initexit_lock.
* If used on behalf of the app, it's up to the caller to check for privileges.
*/
bool
set_synched_thread_context(thread_record_t *trec,
/* pass either mc or both cxt and cxt_size */
priv_mcontext_t *mc, void *cxt, size_t cxt_size,
thread_synch_state_t desired_state _IF_X64(byte *cxt_alloc)
_IF_WINDOWS(NTSTATUS *status /*OUT*/))
{
bool res = true;
ASSERT(trec != NULL && trec->dcontext != NULL);
ASSERT(trec->dcontext != get_thread_private_dcontext());
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
#ifdef WINDOWS
if (status != NULL)
*status = STATUS_SUCCESS;
#endif
if (waiting_at_safe_spot(trec, desired_state)) {
/* case 10101: to allow system calls in check_wait_at_safe_spot() for
* performance reasons we have the waiting thread perform its own setcontext.
*/
thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field;
spinmutex_lock(tsd->synch_lock);
if (tsd->set_mcontext != NULL || tsd->set_context != NULL) {
/* Two synchs in a row while still waiting; 2nd takes precedence */
STATS_INC(wait_multiple_setcxt);
synch_thread_free_setcontext(tsd);
}
#ifdef WINDOWS
LOG(THREAD_GET, LOG_SYNCH, 2,
"set_synched_thread_context %d to pc " PFX " via %s\n", trec->id,
(mc != NULL) ? mc->pc : (app_pc)((CONTEXT *)cxt)->CXT_XIP,
(mc != NULL) ? "mc" : "CONTEXT");
#else
ASSERT_NOT_IMPLEMENTED(mc != NULL); /* XXX: need sigcontext or sig_full_cxt_t */
#endif
if (mc != NULL)
tsd->set_mcontext = mc;
else {
ASSERT(cxt != NULL && cxt_size > 0);
tsd->set_context = cxt;
tsd->set_context_size = cxt_size;
}
IF_X64(tsd->set_context_alloc = cxt_alloc);
ASSERT(THREAD_SYNCH_SAFE(tsd->synch_perm, desired_state));
ASSERT(tsd->pending_synch_count >= 0);
/* Don't need to change pending_synch_count or anything; when thread is
* resumed it will properly reset everything itself */
spinmutex_unlock(tsd->synch_lock);
} else {
if (mc != NULL) {
res = thread_set_mcontext(trec, mc);
} else {
#ifdef WINDOWS
/* sort of ugly: but NtSetContextThread handling needs the status */
if (status != NULL) {
*status = nt_set_context(trec->handle, (CONTEXT *)cxt);
res = NT_SUCCESS(*status);
} else
res = thread_set_context(trec->handle, (CONTEXT *)cxt);
#else
/* currently there are no callers who don't pass mc: presumably
* PR 212090 will change that */
ASSERT_NOT_IMPLEMENTED(false);
#endif
}
free_setcontext(mc, cxt, cxt_size _IF_X64(cxt_alloc));
}
return res;
}
/* This is used to limit the maximum number of times synch_with_thread or
* synch_with_all_threads spin yield loops while waiting on an exiting thread.
* We assert if we ever break out of the loop because of this limit. FIXME make
* sure this limit is large enough that if it does ever trigger it's because
* of some kind of deadlock situation. Breaking out of the synchronization loop
* early is a correctness issue. Right now the limits are large but arbitrary.
* FIXME : once we are confident about thread synch get rid of these max loop checks.
* N.B.: the THREAD_SYNCH_SMALL_LOOP_MAX flag causes us to divide these by 10.
*/
#define SYNCH_ALL_THREADS_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_all_threads_max_loops))
#define SYNCH_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_thread_max_loops))
/* Amt of time in ms to wait for threads to get to a safe spot per a loop,
* see comments in synch_with_yield() on value. Our default value is 5ms which,
* depending on the tick resolution could end up being as long as 10 ms. */
#define SYNCH_WITH_WAIT_MS ((int)DYNAMO_OPTION(synch_with_sleep_time))
/* for use by synch_with_* routines to wait for thread(s) */
static void
synch_thread_yield()
{
/* xref 9400, 9488 - os_thread_yield() works ok on an UP machine, but on an MP machine
* yield might not actually do anything (in which case we burn through to the max
* loop counts pretty quick). We actually do want to wait a reasonable amt of time
* since the target thread might be doing some long latency dr operation (like
* dumping 500kb of registry into a forensics file) so we have the option to sleep
* instead. */
uint num_procs = get_num_processors();
ASSERT(num_procs != 0);
if ((num_procs == 1 && DYNAMO_OPTION(synch_thread_sleep_UP)) ||
(num_procs > 1 && DYNAMO_OPTION(synch_thread_sleep_MP))) {
os_thread_sleep(SYNCH_WITH_WAIT_MS);
} else {
os_thread_yield();
}
}
/* returns a thread_synch_result_t value
* id - the thread you want to synch with
* block - whether or not should spin until synch is successful
* hold_initexit_lock - whether or not the caller holds the thread_initexit_lock
* caller_state - a given permission define from above that describes the
* current state of the caller (note that holding the initexit
* lock is ok with respect to NO_LOCK
* desired_state - a requested state define from above that describes the
* desired synchronization
* flags - options from THREAD_SYNCH_ bitmask values
* NOTE - if you hold the initexit_lock and block with greater than NONE for
* caller state, then initexit_lock may be released and re-acquired
* NOTE - if any of the nt_ routines fails, it is assumed the thread no longer
* exists and returns true
* NOTE - if called directly (i.e. not through synch_with_all_threads)
* requires THREAD_SYNCH_IS_SAFE(caller_state, desired_state) to avoid deadlock
* NOTE - Requires the caller is !could_be_linking (i.e. not in an
* enter_couldbelinking state)
* NOTE - you can't call this with a thread that you've already suspended
*/
thread_synch_result_t
synch_with_thread(thread_id_t id, bool block, bool hold_initexit_lock,
thread_synch_permission_t caller_state,
thread_synch_state_t desired_state, uint flags)
{
thread_id_t my_id = d_r_get_thread_id();
uint loop_count = 0;
int expect_exiting = 0;
thread_record_t *my_tr = thread_lookup(my_id), *trec = NULL;
dcontext_t *dcontext = NULL;
priv_mcontext_t mc;
thread_synch_result_t res = THREAD_SYNCH_RESULT_NOT_SAFE;
bool first_loop = true;
IF_UNIX(bool actually_suspended = true;)
const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags)
? (SYNCH_MAXIMUM_LOOPS / 10)
: SYNCH_MAXIMUM_LOOPS;
ASSERT(id != my_id);
/* Must set ABORT or IGNORE. Only caller can RETRY as need a new
* set of threads for that, hoping problematic one is short-lived.
*/
ASSERT(
TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE,
flags));
if (my_tr != NULL) {
dcontext = my_tr->dcontext;
expect_exiting = dcontext->is_exiting ? 1 : 0;
ASSERT(exiting_thread_count >= expect_exiting);
} else {
/* calling thread should always be a known thread */
ASSERT_NOT_REACHED();
}
LOG(THREAD, LOG_SYNCH, 2,
"Synching with thread " TIDFMT ", giving %d, requesting %d, blocking=%d\n", id,
caller_state, desired_state, block);
if (!hold_initexit_lock)
d_r_mutex_lock(&thread_initexit_lock);
while (true) {
/* get thread record */
/* FIXME : thread id recycling is possible that this could be a
* different thread, perhaps we should take handle instead of id
* FIXME: use the new num field of thread_record_t?
*/
LOG(THREAD, LOG_SYNCH, 3, "Looping on synch with thread " TIDFMT "\n", id);
trec = thread_lookup(id);
/* We test the exiting thread count to avoid races between terminate/
* suspend thread (current thread, though we could be here for other
* reasons) and an exiting thread (who might no longer be on the all
* threads list) who is still using shared resources (ref case 3121) */
if ((trec == NULL && exiting_thread_count == expect_exiting) ||
loop_count++ > max_loops) {
/* make sure we didn't exit the loop without synchronizing, FIXME :
* in release builds we assume the synchronization is failing and
* continue without it, but that is dangerous.
* It is now up to the caller to handle this, and some use
* small loop counts and abort on failure, so only a curiosity. */
ASSERT_CURIOSITY(loop_count < max_loops);
LOG(THREAD, LOG_SYNCH, 3,
"Exceeded loop count synching with thread " TIDFMT "\n", id);
goto exit_synch_with_thread;
}
DOSTATS({
if (trec == NULL && exiting_thread_count > expect_exiting) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n");
STATS_INC(synch_yields_for_exiting_thread);
}
});
#ifdef UNIX
if (trec != NULL && trec->execve) {
/* i#237/PR 498284: clean up vfork "threads" that invoked execve.
* There should be no race since vfork suspends the parent.
*/
res = THREAD_SYNCH_RESULT_SUCCESS;
actually_suspended = false;
break;
}
#endif
if (trec != NULL) {
if (first_loop) {
adjust_wait_at_safe_spot(trec->dcontext, 1);
first_loop = false;
}
if (!os_thread_suspend(trec)) {
/* FIXME : eventually should be a real assert once we figure out
* how to handle threads with low privilege handles */
/* For dr_api_exit, we may have missed a thread exit. */
ASSERT_CURIOSITY_ONCE(
IF_APP_EXPORTS(dr_api_exit ||)(false &&
"Thead synch unable to suspend target"
" thread, case 2096?"));
res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)
? THREAD_SYNCH_RESULT_SUCCESS
: THREAD_SYNCH_RESULT_SUSPEND_FAILURE);
IF_UNIX(actually_suspended = false);
break;
}
if (!thread_get_mcontext(trec, &mc)) {
/* FIXME : eventually should be a real assert once we figure out
* how to handle threads with low privilege handles */
ASSERT_CURIOSITY_ONCE(false &&
"Thead synch unable to get_context target"
" thread, case 2096?");
res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)
? THREAD_SYNCH_RESULT_SUCCESS
: THREAD_SYNCH_RESULT_SUSPEND_FAILURE);
/* Make sure to not leave suspended if not returning success */
if (!TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags))
os_thread_resume(trec);
break;
}
if (at_safe_spot(trec, &mc, desired_state)) {
/* FIXME: case 5325 for detach handling and testing */
IF_WINDOWS(
ASSERT_NOT_IMPLEMENTED(!dcontext->aslr_context.sys_aslr_clobbered));
LOG(THREAD, LOG_SYNCH, 2, "Thread " TIDFMT " suspended in good spot\n",
id);
LOG(trec->dcontext->logfile, LOG_SYNCH, 2,
"@@@@@@@@@@@@@@@@@@ SUSPENDED BY THREAD " TIDFMT " synch_with_thread "
"@@@@@@@@@@@@@@@@@@\n",
my_id);
res = THREAD_SYNCH_RESULT_SUCCESS;
break;
} else {
RSTATS_INC(synchs_not_at_safe_spot);
}
if (!os_thread_resume(trec)) {
ASSERT_NOT_REACHED();
res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)
? THREAD_SYNCH_RESULT_SUCCESS
: THREAD_SYNCH_RESULT_SUSPEND_FAILURE);
break;
}
}
/* don't loop if !block, before we ever release initexit_lock in case
* caller is holding it and not blocking, (i.e. wants to keep it) */
if (!block)
break;
/* see if someone is waiting for us */
if (dcontext != NULL && caller_state != THREAD_SYNCH_NONE &&
should_wait_at_safe_spot(dcontext)) {
if (trec != NULL)
adjust_wait_at_safe_spot(trec->dcontext, -1);
d_r_mutex_unlock(&thread_initexit_lock);
/* ref case 5552, if we've inc'ed the exiting thread count need to
* adjust it back before calling check_wait_at_safe_spot since we
* may end up being killed there */
if (dcontext->is_exiting) {
ASSERT(exiting_thread_count >= 1);
ATOMIC_DEC(int, exiting_thread_count);
}
check_wait_at_safe_spot(dcontext, caller_state);
if (dcontext->is_exiting) {
ATOMIC_INC(int, exiting_thread_count);
}
d_r_mutex_lock(&thread_initexit_lock);
trec = thread_lookup(id);
/* Like above, we test the exiting thread count to avoid races
* between terminate/suspend thread (current thread, though we
* could be here for other reasons) and an exiting thread (who
* might no longer be on the all threads list) who is still using
* shared resources (ref case 3121) */
if (trec == NULL && exiting_thread_count == expect_exiting) {
if (!hold_initexit_lock)
d_r_mutex_unlock(&thread_initexit_lock);
return THREAD_SYNCH_RESULT_SUCCESS;
}
DOSTATS({
if (trec == NULL && exiting_thread_count > expect_exiting) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n");
STATS_INC(synch_yields_for_exiting_thread);
}
});
if (trec != NULL)
adjust_wait_at_safe_spot(trec->dcontext, 1);
}
STATS_INC(synch_yields);
d_r_mutex_unlock(&thread_initexit_lock);
/* Note - we only need call the ENTER/EXIT_DR hooks if single thread
* in dr since we are not really exiting DR here (we just need to give
* up the exclusion lock for a while to let thread we are trying to
* synch with make progress towards a safe synch point). */
if (INTERNAL_OPTION(single_thread_in_DR))
EXITING_DR(); /* give up DR exclusion lock */
synch_thread_yield();
if (INTERNAL_OPTION(single_thread_in_DR))
ENTERING_DR(); /* re-gain DR exclusion lock */
d_r_mutex_lock(&thread_initexit_lock);
}
/* reset this back to before */
adjust_wait_at_safe_spot(trec->dcontext, -1);
/* success!, is suspended (or already exited) put in desired state */
if (res == THREAD_SYNCH_RESULT_SUCCESS) {
LOG(THREAD, LOG_SYNCH, 2,
"Success synching with thread " TIDFMT " performing cleanup\n", id);
if (THREAD_SYNCH_IS_TERMINATED(desired_state)) {
if (IF_UNIX_ELSE(!trec->execve, true))
os_thread_terminate(trec);
#ifdef UNIX
/* We need to ensure the target thread has received the
* signal and is no longer using its sigstack or ostd struct
* before we clean those up.
*/
/* PR 452168: if failed to send suspend signal, do not spin */
if (actually_suspended) {
if (!is_thread_terminated(trec->dcontext)) {
/* i#96/PR 295561: use futex(2) if available. Blocks until
* the thread gets terminated.
*/
os_wait_thread_terminated(trec->dcontext);
}
} else
ASSERT(TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags));
#endif
}
if (THREAD_SYNCH_IS_CLEANED(desired_state)) {
dynamo_other_thread_exit(trec _IF_WINDOWS(false));
}
}
exit_synch_with_thread:
if (!hold_initexit_lock)
d_r_mutex_unlock(&thread_initexit_lock);
return res;
}
/* desired_synch_state - a requested state define from above that describes
* the synchronization required
* threads, num_threads - must not be NULL, if !THREAD_SYNCH_IS_CLEANED(desired
* synch_state) then will hold a list and num of threads
* cur_state - a given permission from above that describes the state of the
* caller
* flags - options from THREAD_SYNCH_ bitmask values
* NOTE - Requires that the caller doesn't hold the thread_initexit_lock, on
* return caller will hold the thread_initexit_lock
* NOTE - Requires the caller is !could_be_linking (i.e. not in an
* enter_couldbelinking state)
* NOTE - To avoid deadlock this routine should really only be called with
* cur_state giving maximum permissions, (currently app_exit and detach could
* conflict, except our routes to app_exit go through different synch point
* (TermThread or TermProcess) first
* NOTE - when !all_synched, if desired_synch_state is not cleaned or synch result is
* ignored, the caller is reponsible for resuming threads that are suspended,
* freeing allocation for threads array and releasing locks
* Caller should call end_synch_with_all_threads when finished to accomplish that.
*/
bool
synch_with_all_threads(thread_synch_state_t desired_synch_state,
/*OUT*/ thread_record_t ***threads_out,
/*OUT*/ int *num_threads_out, thread_synch_permission_t cur_state,
/* FIXME: turn the ThreadSynch* enums into bitmasks and merge
* into flags param */
uint flags)
{
/* Case 8815: we cannot use the OUT params themselves internally as they
* may be volatile, so we need our own values until we're ready to return
*/
bool threads_are_stale = true;
thread_record_t **threads = NULL;
int num_threads = 0;
/* we record ids from before we gave up thread_initexit_lock */
thread_id_t *thread_ids_temp = NULL;
int num_threads_temp = 0, i, j, expect_self_exiting = 0;
/* synch array contains a SYNCH_WITH_ALL_ value for each thread */
uint *synch_array = NULL, *synch_array_temp = NULL;
enum {
SYNCH_WITH_ALL_NEW = 0,
SYNCH_WITH_ALL_NOTIFIED = 1,
SYNCH_WITH_ALL_SYNCHED = 2,
};
bool all_synched = false;
thread_id_t my_id = d_r_get_thread_id();
uint loop_count = 0;
thread_record_t *tr = thread_lookup(my_id);
dcontext_t *dcontext = NULL;
uint flags_one; /* flags for synch_with_thread() call */
thread_synch_result_t synch_res;
const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags)
? (SYNCH_ALL_THREADS_MAXIMUM_LOOPS / 10)
: SYNCH_ALL_THREADS_MAXIMUM_LOOPS;
#ifdef CLIENT_INTERFACE
/* We treat client-owned threads as native but they don't have a clean native state
* for us to suspend them in (they are always in client or dr code). We need to be
* able to suspend such threads so that they're !couldbelinking and holding no dr
* locks. We make the assumption that client-owned threads that are in the client
* library (or are in a dr routine that has set dcontext->client_thread_safe_to_sync)
* meet this requirement (see at_safe_spot()). As such, all we need to worry about
* here are client locks the client-owned thread might hold that could block other
* threads from reaching safe spots. If we only suspend client-owned threads once
* all other threads are taken care of then this is not a problem. FIXME - xref
* PR 231301 on issues that arise if the client thread spends most of its time
* calling out of its lib to dr API, ntdll, or generated code functions. */
bool finished_non_client_threads;
#endif
ASSERT(!dynamo_all_threads_synched);
/* flag any caller who does not give up enough permissions to avoid livelock
* with other synch_with_all_threads callers
*/
ASSERT_CURIOSITY(cur_state >= THREAD_SYNCH_NO_LOCKS_NO_XFER);
/* also flag anyone asking for full mcontext w/o possibility of no_xfer,
* which can also livelock
*/
ASSERT_CURIOSITY(desired_synch_state < THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT
/* detach currently violates this: bug 8942 */
|| started_detach);
/* must set exactly one of these -- FIXME: better way to check? */
ASSERT(
TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE |
THREAD_SYNCH_SUSPEND_FAILURE_RETRY,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_RETRY,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SUSPEND_FAILURE_RETRY,
flags));
flags_one = flags;
/* we'll do the retry */
if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags)) {
flags_one &= ~THREAD_SYNCH_SUSPEND_FAILURE_RETRY;
flags_one |= THREAD_SYNCH_SUSPEND_FAILURE_ABORT;
}
if (tr != NULL) {
dcontext = tr->dcontext;
expect_self_exiting = dcontext->is_exiting ? 1 : 0;
ASSERT(exiting_thread_count >= expect_self_exiting);
} else {
/* calling thread should always be a known thread */
ASSERT_NOT_REACHED();
}
LOG(THREAD, LOG_SYNCH, 1,
"synch with all threads my id = " SZFMT
" Giving %d permission and seeking %d state\n",
my_id, cur_state, desired_synch_state);
/* grab all_threads_synch_lock */
/* since all_threads synch doesn't give any permissions this is necessary
* to prevent deadlock in the case of two threads trying to synch with all
* threads at the same time */
/* FIXME: for DEADLOCK_AVOIDANCE, to preserve LIFO, should we
* exit DR, trylock, then immediately enter DR? introducing any
* race conditions in doing so?
* Ditto on all other os_thread_yields in this file!
*/
while (!d_r_mutex_trylock(&all_threads_synch_lock)) {
LOG(THREAD, LOG_SYNCH, 2, "Spinning on all threads synch lock\n");
STATS_INC(synch_yields);
if (dcontext != NULL && cur_state != THREAD_SYNCH_NONE &&
should_wait_at_safe_spot(dcontext)) {
/* ref case 5552, if we've inc'ed the exiting thread count need to
* adjust it back before calling check_wait_at_safe_spot since we
* may end up being killed there */
if (dcontext->is_exiting) {
ASSERT(exiting_thread_count >= 1);
ATOMIC_DEC(int, exiting_thread_count);
}
check_wait_at_safe_spot(dcontext, cur_state);
if (dcontext->is_exiting) {
ATOMIC_INC(int, exiting_thread_count);
}
}
LOG(THREAD, LOG_SYNCH, 2, "Yielding on all threads synch lock\n");
/* Note - we only need call the ENTER/EXIT_DR hooks if single thread
* in dr since we are not really exiting DR here (we just need to give
* up the exclusion lock for a while to let thread we are trying to
* synch with make progress towards a safe synch point). */
if (INTERNAL_OPTION(single_thread_in_DR))
EXITING_DR(); /* give up DR exclusion lock */
os_thread_yield();
if (INTERNAL_OPTION(single_thread_in_DR))
ENTERING_DR(); /* re-gain DR exclusion lock */
}
d_r_mutex_lock(&thread_initexit_lock);
/* synch with all threads */
/* FIXME: this should be a do/while loop - then we wouldn't have
* to initialize all the variables above
*/
while (threads_are_stale || !all_synched ||
exiting_thread_count > expect_self_exiting || uninit_thread_count > 0) {
if (threads != NULL) {
/* Case 8941: must free here rather than when yield (below) since
* termination condition can change between there and here
*/
ASSERT(num_threads > 0);
global_heap_free(threads,
num_threads *
sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
/* be paranoid */
threads = NULL;
num_threads = 0;
}
get_list_of_threads(&threads, &num_threads);
threads_are_stale = false;
synch_array = (uint *)global_heap_alloc(num_threads *
sizeof(uint) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++) {
synch_array[i] = SYNCH_WITH_ALL_NEW;
}
/* Fixme : an inefficient algorithm, but is not as bad as it seems
* since it is very unlikely that many threads have started or ended
* and the list threads routine always puts them in the same order
*/
/* on first loop num_threads_temp == 0 */
for (i = 0; i < num_threads_temp; i++) {
/* care only if we have already notified or synched thread */
if (synch_array_temp[i] != SYNCH_WITH_ALL_NEW) {
for (j = 0; j < num_threads; j++) {
/* FIXME : os recycles thread ids, should have stronger
* check here, could check dcontext equivalence, (but we
* recycle those to), probably should check threads_temp
* handle and be sure thread is still alive since the id
* won't be recycled then */
if (threads[j]->id == thread_ids_temp[i]) {
synch_array[j] = synch_array_temp[i];
break;
}
}
}
}
/* free old synch list, old thread id list */
if (num_threads_temp > 0) {
global_heap_free(thread_ids_temp,
num_threads_temp *
sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT));
global_heap_free(synch_array_temp,
num_threads_temp * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT));
num_threads_temp = 0;
}
all_synched = true;
LOG(THREAD, LOG_SYNCH, 3, "Looping over all threads (%d threads)\n", num_threads);
#ifdef CLIENT_INTERFACE
finished_non_client_threads = true;
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != my_id && synch_array[i] != SYNCH_WITH_ALL_SYNCHED &&
!IS_CLIENT_THREAD(threads[i]->dcontext)) {
finished_non_client_threads = false;
break;
}
}
#endif
/* make a copy of the thread ids (can't just keep the thread list
* since it consists of pointers to live thread_record_t structs).
* we must make the copy before synching b/c cleaning up a thread
* involves freeing its thread_record_t.
*/
thread_ids_temp = (thread_id_t *)global_heap_alloc(
num_threads * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++)
thread_ids_temp[i] = threads[i]->id;
num_threads_temp = num_threads;
synch_array_temp = synch_array;
for (i = 0; i < num_threads; i++) {
/* do not de-ref threads[i] after synching if it was cleaned up! */
if (synch_array[i] != SYNCH_WITH_ALL_SYNCHED && threads[i]->id != my_id) {
#ifdef CLIENT_INTERFACE
if (!finished_non_client_threads &&
IS_CLIENT_THREAD(threads[i]->dcontext)) {
all_synched = false;
continue; /* skip this thread for now till non-client are finished */
}
if (IS_CLIENT_THREAD(threads[i]->dcontext) &&
(TEST(flags, THREAD_SYNCH_SKIP_CLIENT_THREAD) ||
!should_suspend_client_thread(threads[i]->dcontext,
desired_synch_state))) {
/* PR 609569: do not suspend this thread.
* Avoid races between resume_all_threads() and
* dr_client_thread_set_suspendable() by storing the fact.
*
* For most of our synchall purposes we really want to prevent
* threads from acting on behalf of the application, and make
* sure we can relocate them if in the code cache. DR itself is
* thread-safe, and while a synchall-initiator will touch
* thread-private data for threads it suspends, having some
* threads it does not suspend shouldn't cause any problems so
* long as it doesn't touch their thread-private data.
*/
synch_array[i] = SYNCH_WITH_ALL_SYNCHED;
threads[i]->dcontext->client_data->left_unsuspended = true;
continue;
}
#endif
/* speed things up a tad */
if (synch_array[i] != SYNCH_WITH_ALL_NOTIFIED) {
ASSERT(synch_array[i] == SYNCH_WITH_ALL_NEW);
adjust_wait_at_safe_spot(threads[i]->dcontext, 1);
synch_array[i] = SYNCH_WITH_ALL_NOTIFIED;
}
LOG(THREAD, LOG_SYNCH, 2,
"About to try synch with thread #%d/%d " TIDFMT "\n", i, num_threads,
threads[i]->id);
synch_res =
synch_with_thread(threads[i]->id, false, true, THREAD_SYNCH_NONE,
desired_synch_state, flags_one);
if (synch_res == THREAD_SYNCH_RESULT_SUCCESS) {
LOG(THREAD, LOG_SYNCH, 2, "Synch succeeded!\n");
/* successful synch */
synch_array[i] = SYNCH_WITH_ALL_SYNCHED;
if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state))
adjust_wait_at_safe_spot(threads[i]->dcontext, -1);
} else {
LOG(THREAD, LOG_SYNCH, 2, "Synch failed!\n");
all_synched = false;
if (synch_res == THREAD_SYNCH_RESULT_SUSPEND_FAILURE) {
if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags))
goto synch_with_all_abort;
} else
ASSERT(synch_res == THREAD_SYNCH_RESULT_NOT_SAFE);
}
} else {
LOG(THREAD, LOG_SYNCH, 2, "Skipping synch with thread " TIDFMT "\n",
thread_ids_temp[i]);
}
}
if (loop_count++ >= max_loops)
break;
/* We test the exiting thread count to avoid races between exit
* process (current thread, though we could be here for detach or other
* reasons) and an exiting thread (who might no longer be on the all
* threads list) who is still using shared resources (ref case 3121) */
if (!all_synched || exiting_thread_count > expect_self_exiting ||
uninit_thread_count > 0) {
DOSTATS({
if (all_synched && exiting_thread_count > expect_self_exiting) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread %d %d %d\n",
all_synched, exiting_thread_count, expect_self_exiting);
STATS_INC(synch_yields_for_exiting_thread);
} else if (all_synched && uninit_thread_count > 0) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an uninit thread %d %d\n",
all_synched, uninit_thread_count);
STATS_INC(synch_yields_for_uninit_thread);
}
});
STATS_INC(synch_yields);
/* release lock in case some other thread waiting on it */
d_r_mutex_unlock(&thread_initexit_lock);
LOG(THREAD, LOG_SYNCH, 2, "Not all threads synched looping again\n");
/* Note - we only need call the ENTER/EXIT_DR hooks if single
* thread in dr since we are not really exiting DR here (we just
* need to give up the exclusion lock for a while to let thread we
* are trying to synch with make progress towards a safe synch
* point). */
if (INTERNAL_OPTION(single_thread_in_DR))
EXITING_DR(); /* give up DR exclusion lock */
synch_thread_yield();
if (INTERNAL_OPTION(single_thread_in_DR))
ENTERING_DR(); /* re-gain DR exclusion lock */
d_r_mutex_lock(&thread_initexit_lock);
/* We unlock and lock the thread_initexit_lock, so threads might be stale. */
threads_are_stale = true;
}
}
/* case 9392: callers passing in ABORT expect a return value of failure
* to correspond w/ no suspended threads, a freed threads array, and no
* locks being held, so we go through the abort path
*/
if (!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags))
goto synch_with_all_abort;
synch_with_all_exit:
/* make sure we didn't exit the loop without synchronizing, FIXME : in
* release builds we assume the synchronization is failing and continue
* without it, but that is dangerous.
* It is now up to the caller to handle this, and some use
* small loop counts and abort on failure, so only a curiosity. */
ASSERT_CURIOSITY(loop_count < max_loops);
ASSERT(threads != NULL);
/* Since the set of threads can change we don't set the success field
* until we're passing back the thread list.
* We would use an tsd field directly instead of synch_array except
* for THREAD_SYNCH_*_CLEAN where tsd is freed.
*/
ASSERT(synch_array != NULL);
if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state)) { /* else unsafe to access tsd */
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != my_id) {
thread_synch_data_t *tsd;
ASSERT(threads[i]->dcontext != NULL);
tsd = (thread_synch_data_t *)threads[i]->dcontext->synch_field;
tsd->synch_with_success = (synch_array[i] == SYNCH_WITH_ALL_SYNCHED);
}
}
}
global_heap_free(synch_array, num_threads * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT));
if (num_threads_temp > 0) {
global_heap_free(thread_ids_temp,
num_threads_temp *
sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT));
}
/* FIXME case 9333: on all_synch failure we do not free threads array if
* synch_result is ignored. Callers are responsible for resuming threads that are
* suspended and freeing allocation for threads array
*/
if ((!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) ||
THREAD_SYNCH_IS_CLEANED(desired_synch_state)) {
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
threads = NULL;
num_threads = 0;
}
LOG(THREAD, LOG_SYNCH, 1, "Finished synch with all threads: result=%d\n",
all_synched);
DOLOG(1, LOG_SYNCH, {
if (all_synched) {
LOG(THREAD, LOG_SYNCH, 1,
"\treturning holding initexit_lock and all_threads_synch_lock\n");
}
});
*threads_out = threads;
*num_threads_out = num_threads;
dynamo_all_threads_synched = all_synched;
ASSERT(exiting_thread_count - expect_self_exiting == 0);
/* FIXME case 9392: where on all_synch failure we do not release the locks in the
* non-abort exit path */
return all_synched;
synch_with_all_abort:
/* undo everything! */
for (i = 0; i < num_threads; i++) {
DEBUG_DECLARE(bool ok;)
if (threads[i]->id != my_id) {
if (synch_array[i] == SYNCH_WITH_ALL_SYNCHED) {
bool resume = true;
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(threads[i]->dcontext) &&
threads[i]->dcontext->client_data->left_unsuspended) {
/* PR 609569: we did not suspend this thread */
resume = false;
}
#endif
if (resume) {
DEBUG_DECLARE(ok =)
os_thread_resume(threads[i]);
ASSERT(ok);
}
/* ensure synch_with_success is set to false on exit path,
* even though locks are released and not fully valid
*/
synch_array[i] = SYNCH_WITH_ALL_NEW;
} else if (synch_array[i] == SYNCH_WITH_ALL_NOTIFIED) {
adjust_wait_at_safe_spot(threads[i]->dcontext, -1);
}
}
}
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
ASSERT(exiting_thread_count - expect_self_exiting == 0);
ASSERT(!all_synched); /* ensure our OUT values will be NULL,0
for THREAD_SYNCH_SUSPEND_FAILURE_ABORT */
goto synch_with_all_exit;
}
/* Assumes that the threads were suspended with synch_with_all_threads()
* and thus even is_thread_currently_native() threads were suspended.
* Assumes that the caller will free up threads if it is dynamically allocated.
*/
void
resume_all_threads(thread_record_t **threads, const uint num_threads)
{
uint i;
thread_id_t my_tid;
bool res;
ASSERT_OWN_MUTEX(true, &all_threads_synch_lock);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
if (threads == NULL || num_threads == 0)
return;
my_tid = d_r_get_thread_id();
for (i = 0; i < num_threads; i++) {
if (my_tid == threads[i]->id)
continue;
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(threads[i]->dcontext) &&
threads[i]->dcontext->client_data->left_unsuspended) {
/* PR 609569: we did not suspend this thread */
threads[i]->dcontext->client_data->left_unsuspended = false;
continue;
}
#endif
/* This routine assumes that each thread in the array was suspended, so
* each one has to successfully resume.
*/
res = os_thread_resume(threads[i]);
ASSERT(res);
}
}
/* Should be called to clean up after synch_with_all_threads as otherwise
* dynamo_all_threads_synched will be left as true.
* If resume is true, resumes the threads in the threads array.
* Unlocks thread_initexit_lock and all_threads_synch_lock.
* If threads != NULL, frees the threads array.
*/
void
end_synch_with_all_threads(thread_record_t **threads, uint num_threads, bool resume)
{
/* dynamo_all_threads_synched will be false if synch failed */
ASSERT_CURIOSITY(dynamo_all_threads_synched);
ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock));
dynamo_all_threads_synched = false;
if (resume) {
ASSERT(threads != NULL);
resume_all_threads(threads, num_threads);
}
/* if we knew whether THREAD_SYNCH_*_CLEANED was specified we could set
* synch_with_success to false, but it's unsafe otherwise
*/
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
if (threads != NULL) {
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
}
/* Resets a thread's context to start interpreting anew.
* ASSUMPTION: the thread is currently suspended.
* This was moved here from fcache_reset_all_caches_proactively simply to
* get access to win32-private CONTEXT-related routines
*/
void
translate_from_synchall_to_dispatch(thread_record_t *tr, thread_synch_state_t synch_state)
{
bool res;
/* we do not have to align priv_mcontext_t */
priv_mcontext_t *mc = global_heap_alloc(sizeof(*mc) HEAPACCT(ACCT_OTHER));
bool free_cxt = true;
dcontext_t *dcontext = tr->dcontext;
app_pc pre_translation;
ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock));
/* FIXME: would like to assert that suspendcount is > 0 but how? */
ASSERT(thread_synch_successful(tr));
res = thread_get_mcontext(tr, mc);
ASSERT(res);
pre_translation = (app_pc)mc->pc;
LOG(GLOBAL, LOG_CACHE, 2, "\trecreating address for " PFX "\n", mc->pc);
LOG(THREAD, LOG_CACHE, 2,
"translate_from_synchall_to_dispatch: being translated from " PFX "\n", mc->pc);
if (get_at_syscall(dcontext)) {
/* Don't need to do anything as shared_syscall and do_syscall will not
* change due to a reset and will have any inlined ibl updated. If we
* did try to send these guys back to d_r_dispatch, have to set asynch_tag
* (as well as next_tag since translation looks only at that), restore
* TOS to asynch_target/esi (unless still at reset state), and have to
* figure out how to avoid post-syscall processing for those who never
* did pre-syscall processing (i.e., if at shared_syscall) (else will
* get wrong dcontext->sysnum, etc.)
* Not to mention that after resuming the kernel will finish the
* syscall and clobber several registers, making it hard to set a
* clean state (xref case 6113, case 5074, and notes below)!
* It's just too hard to redirect while at a syscall.
*/
LOG(GLOBAL, LOG_CACHE, 2, "\tat syscall so not translating\n");
/* sanity check */
ASSERT(is_after_syscall_address(dcontext, pre_translation) ||
IF_WINDOWS_ELSE(pre_translation == vsyscall_after_syscall,
is_after_or_restarted_do_syscall(dcontext, pre_translation,
true /*vsys*/)));
#if defined(UNIX) && defined(X86_32)
if (pre_translation == vsyscall_sysenter_return_pc ||
pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc) {
/* Because we remove the vsyscall hook on a send_all_other_threads_native()
* yet have no barrier to know the threads have run their own go-native
* code, we want to send them away from the hook, to our gencode.
*/
if (pre_translation == vsyscall_sysenter_return_pc)
mc->pc = after_do_shared_syscall_addr(dcontext);
else if (pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc)
mc->pc = get_do_int_syscall_entry(dcontext);
/* exit stub and subsequent fcache_return will save rest of state */
res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0,
synch_state _IF_X64((void *)mc)
_IF_WINDOWS(NULL));
ASSERT(res);
/* cxt is freed by set_synched_thread_context() or target thread */
free_cxt = false;
}
#endif
IF_ARM({
if (INTERNAL_OPTION(steal_reg_at_reset) != 0) {
/* We don't want to translate, just update the stolen reg values */
arch_mcontext_reset_stolen_reg(dcontext, mc);
res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0,
synch_state _IF_X64((void *)mc)
_IF_WINDOWS(NULL));
ASSERT(res);
/* cxt is freed by set_synched_thread_context() or target thread */
free_cxt = false;
}
});
} else {
res = translate_mcontext(tr, mc, true /*restore memory*/, NULL);
ASSERT(res);
if (!thread_synch_successful(tr) || mc->pc == 0) {
/* Better to risk failure on accessing a freed cache than
* to have a guaranteed crash by sending to NULL.
* FIXME: it's possible the real translation is NULL,
* but if so should be fine to leave it there since the
* current eip should also be NULL.
*/
ASSERT_NOT_REACHED();
goto translate_from_synchall_to_dispatch_exit;
}
LOG(GLOBAL, LOG_CACHE, 2, "\ttranslation pc = " PFX "\n", mc->pc);
ASSERT(!is_dynamo_address((app_pc)mc->pc) && !in_fcache((app_pc)mc->pc));
IF_ARM({
if (INTERNAL_OPTION(steal_reg_at_reset) != 0) {
/* XXX: do we need this? Will signal.c will fix it up prior
* to sigreturn from suspend handler?
*/
arch_mcontext_reset_stolen_reg(dcontext, mc);
}
});
/* We send all threads, regardless of whether was in DR or not, to
* re-interp from translated cxt, to avoid having to handle stale
* local state problems if we simply resumed.
* We assume no KSTATS or other state issues to deal with.
* FIXME: enter hook w/o an exit?
*/
dcontext->next_tag = (app_pc)mc->pc;
/* FIXME PR 212266: for linux if we're at an inlined syscall
* we may have problems: however, we might be able to rely on the kernel
* not clobbering any registers besides eax (which is ok: reset stub
* handles it), though presumably it's allowed to write to any
* caller-saved registers. We may need to change inlined syscalls
* to set at_syscall (see comments below as well).
*/
if (pre_translation ==
IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_sysenter_return_pc) &&
!waiting_at_safe_spot(dcontext->thread_record, synch_state)) {
/* FIXME case 7827/PR 212266: shouldn't translate for this case, right?
* should have -ignore_syscalls set at_syscall and eliminate
* this whole block of code
*/
/* put the proper retaddr back on the stack, as we won't
* be doing the ret natively to regain control, but rather
* will interpret it
*/
/* FIXME: ensure readable and writable? */
app_pc cur_retaddr = *((app_pc *)mc->xsp);
app_pc native_retaddr;
ASSERT(cur_retaddr != NULL);
/* must be ignore_syscalls (else, at_syscall will be set) */
IF_WINDOWS(ASSERT(DYNAMO_OPTION(ignore_syscalls)));
ASSERT(get_syscall_method() == SYSCALL_METHOD_SYSENTER);
/* For DYNAMO_OPTION(sygate_sysenter) we need to restore both stack
* values and fix up esp, but we can't do it here since the kernel
* will change esp... incompatible w/ -ignore_syscalls anyway
*/
IF_WINDOWS(ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter)));
/* may still be at syscall from a prior reset -- don't want to grab
* locks for in_fcache so we determine via the translation
*/
ASSERT_NOT_TESTED();
native_retaddr = recreate_app_pc(dcontext, cur_retaddr, NULL);
if (native_retaddr != cur_retaddr) {
LOG(GLOBAL, LOG_CACHE, 2, "\trestoring TOS to " PFX " from " PFX "\n",
native_retaddr, cur_retaddr);
*((app_pc *)mc->xsp) = native_retaddr;
} else {
LOG(GLOBAL, LOG_CACHE, 2,
"\tnot restoring TOS since still at previous reset state " PFX "\n",
cur_retaddr);
}
}
/* Send back to d_r_dispatch. Rather than setting up last_exit in eax here,
* we point to a special routine to save the correct eax -- in fact it's
* simply a direct exit stub. Originally this was b/c we tried to
* translate threads at system calls, and the kernel clobbers eax (and
* ecx/edx for sysenter, though preserves eip setcontext change: case
* 6113, case 5074) in finishing the system call, but now that we don't
* translate them we've kept the stub approach. It's actually faster
* for the stub itself to save eax and set the linkstub than for us to
* emulate it here, anyway.
* Note that a thread in check_wait_at_safe_spot() spins and will NOT be
* at a syscall, avoiding problems there (case 5074).
*/
mc->pc = (app_pc)get_reset_exit_stub(dcontext);
LOG(GLOBAL, LOG_CACHE, 2, "\tsent to reset exit stub " PFX "\n", mc->pc);
#ifdef WINDOWS
/* i#25: we could have interrupted thread in DR, where has priv fls data
* in TEB, and fcache_return blindly copies into app fls: so swap to app
* now, just in case. DR routine can handle swapping when already app.
*/
swap_peb_pointer(dcontext, false /*to app*/);
#endif
/* exit stub and subsequent fcache_return will save rest of state */
res =
set_synched_thread_context(dcontext->thread_record, mc, NULL, 0,
synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL));
ASSERT(res);
/* cxt is freed by set_synched_thread_context() or target thread */
free_cxt = false;
}
translate_from_synchall_to_dispatch_exit:
if (free_cxt) {
global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER));
}
}
/***************************************************************************
* Detach and similar operations
*/
/* Atomic variable to prevent multiple threads from trying to detach at
* the same time.
*/
DECLARE_CXTSWPROT_VAR(static volatile int dynamo_detaching_flag, LOCK_FREE_STATE);
void
send_all_other_threads_native(void)
{
thread_record_t **threads;
dcontext_t *my_dcontext = get_thread_private_dcontext();
int i, num_threads;
bool waslinking;
/* We're forced to use an asynch model due to not being able to call
* dynamo_thread_not_under_dynamo, which has a bonus of making it easier
* to handle other threads asking for synchall.
* This is why we don't ask for THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT.
*/
const thread_synch_state_t desired_state =
THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER;
ASSERT(dynamo_initialized && !dynamo_exited && my_dcontext != NULL);
LOG(my_dcontext->logfile, LOG_ALL, 1, "%s\n", __FUNCTION__);
LOG(GLOBAL, LOG_ALL, 1, "%s: cur thread " TIDFMT "\n", __FUNCTION__,
d_r_get_thread_id());
waslinking = is_couldbelinking(my_dcontext);
if (waslinking)
enter_nolinking(my_dcontext, NULL, false);
#ifdef WINDOWS
/* Ensure new threads will go straight to native */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
init_apc_go_native_pause = true;
init_apc_go_native = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
# ifdef CLIENT_INTERFACE
wait_for_outstanding_nudges();
# endif
#endif
/* Suspend all threads except those trying to synch with us */
if (!synch_with_all_threads(desired_state, &threads, &num_threads,
THREAD_SYNCH_NO_LOCKS_NO_XFER,
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_SYNCHRONIZE_THREADS, 2,
get_application_name(), get_application_pid());
}
ASSERT(mutex_testlock(&all_threads_synch_lock) &&
mutex_testlock(&thread_initexit_lock));
#ifdef WINDOWS
/* Let threads waiting at APC point go native */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
init_apc_go_native_pause = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
#endif
#ifdef WINDOWS
/* FIXME i#95: handle outstanding callbacks where we've put our retaddr on
* the app stack. This should be able to share
* detach_helper_handle_callbacks() code. Won't the old single-thread
* dr_app_stop() have had this same problem? Since we're not tearing
* everything down, can we solve it by waiting until we hit
* after_shared_syscall_code_ex() in a native thread?
*/
ASSERT_NOT_IMPLEMENTED(get_syscall_method() != SYSCALL_METHOD_SYSENTER);
#endif
for (i = 0; i < num_threads; i++) {
if (threads[i]->dcontext == my_dcontext ||
is_thread_currently_native(threads[i]) ||
/* FIXME i#2784: we should suspend client threads for the duration
* of the app being native to avoid problems with having no
* signal handlers in place.
*/
IS_CLIENT_THREAD(threads[i]->dcontext))
continue;
/* Because dynamo_thread_not_under_dynamo() has to be run by the owning
* thread, the simplest solution is to send everyone back to d_r_dispatch
* with a flag to go native from there, rather than directly setting the
* native context.
*/
threads[i]->dcontext->go_native = true;
if (thread_synch_state_no_xfer(threads[i]->dcontext)) {
/* Another thread trying to synch with us: just let it go. It will
* go native once it gets back to d_r_dispatch which will be before it
* goes into the cache.
*/
continue;
} else {
LOG(my_dcontext->logfile, LOG_ALL, 1, "%s: sending thread %d native\n",
__FUNCTION__, threads[i]->id);
LOG(threads[i]->dcontext->logfile, LOG_ALL, 1,
"**** requested by thread %d to go native\n", my_dcontext->owning_thread);
/* This won't change a thread at a syscall, so we rely on the thread
* going to d_r_dispatch and then going native when its syscall exits.
*
* FIXME i#95: That means the time to go native is, unfortunately,
* unbounded. This means that dr_app_cleanup() needs to synch the
* threads and force-xl8 these. We should share code with detach.
* Right now we rely on the app joining all its threads *before*
* calling dr_app_cleanup(), or using dr_app_stop_and_cleanup[_with_stats]().
* This also means we have a race with unhook_vsyscall in
* os_process_not_under_dynamorio(), which we solve by redirecting
* threads at syscalls to our gencode.
*/
translate_from_synchall_to_dispatch(threads[i], desired_state);
}
}
end_synch_with_all_threads(threads, num_threads, true /*resume*/);
os_process_not_under_dynamorio(my_dcontext);
if (waslinking)
enter_couldbelinking(my_dcontext, NULL, false);
return;
}
void
detach_on_permanent_stack(bool internal, bool do_cleanup, dr_stats_t *drstats)
{
dcontext_t *my_dcontext;
thread_record_t **threads;
thread_record_t *my_tr = NULL;
int i, num_threads, my_idx = -1;
thread_id_t my_id;
#ifdef WINDOWS
bool detach_stacked_callbacks;
bool *cleanup_tpc;
#endif
DEBUG_DECLARE(bool ok;)
DEBUG_DECLARE(int exit_res;)
/* synch-all flags: */
uint flags = 0;
#ifdef WINDOWS
/* For Windows we may fail to suspend a thread (e.g., privilege
* problems), and in that case we want to just ignore the failure.
*/
flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE;
#elif defined(UNIX)
/* For Unix, such privilege problems are rarer but we would still prefer to
* continue if we hit a problem.
*/
flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE;
#endif
/* i#297: we only synch client threads after process exit event. */
flags |= THREAD_SYNCH_SKIP_CLIENT_THREAD;
ENTERING_DR();
/* dynamo_detaching_flag is not really a lock, and since no one ever waits
* on it we can't deadlock on it either.
*/
if (!atomic_compare_exchange(&dynamo_detaching_flag, LOCK_FREE_STATE, LOCK_SET_STATE))
return;
/* Unprotect .data for exit cleanup.
* XXX: more secure to not do this until we've synched, but then need
* alternative prot for started_detach and init_apc_go_native*
*/
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
ASSERT(!started_detach);
started_detach = true;
if (!internal) {
synchronize_dynamic_options();
if (!DYNAMO_OPTION(allow_detach)) {
started_detach = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
dynamo_detaching_flag = LOCK_FREE_STATE;
SYSLOG_INTERNAL_ERROR("Detach called without the allow_detach option set");
EXITING_DR();
return;
}
}
ASSERT(dynamo_initialized);
ASSERT(!dynamo_exited);
my_id = d_r_get_thread_id();
my_dcontext = get_thread_private_dcontext();
if (my_dcontext == NULL) {
/* We support detach after just dr_app_setup() with no start. */
ASSERT(!dynamo_started);
my_tr = thread_lookup(my_id);
ASSERT(my_tr != NULL);
my_dcontext = my_tr->dcontext;
os_process_under_dynamorio_initiate(my_dcontext);
os_process_under_dynamorio_complete(my_dcontext);
dynamo_thread_under_dynamo(my_dcontext);
ASSERT(get_thread_private_dcontext() == my_dcontext);
}
ASSERT(my_dcontext != NULL);
LOG(GLOBAL, LOG_ALL, 1, "Detach: thread %d starting detach process\n", my_id);
SYSLOG(SYSLOG_INFORMATION, INFO_DETACHING, 2, get_application_name(),
get_application_pid());
/* synch with flush */
if (my_dcontext != NULL)
enter_threadexit(my_dcontext);
#ifdef WINDOWS
/* Signal to go native at APC init here. Set pause first so that threads
* will wait till we are ready for them to go native (after ntdll unpatching).
* (To avoid races these must be set in this order!)
*/
init_apc_go_native_pause = true;
init_apc_go_native = true;
/* XXX i#2611: there is still a race for threads caught between init_apc_go_native
* and dynamo_thread_init adding to all_threads: this just reduces the risk.
* Unfortunately we can't easily use the UNIX solution of uninit_thread_count
* since we can't distinguish internally vs externally created threads.
*/
os_thread_yield();
# ifdef CLIENT_INTERFACE
wait_for_outstanding_nudges();
# endif
#endif
#ifdef UNIX
/* i#2270: we ignore alarm signals during detach to reduce races. */
signal_remove_alarm_handlers(my_dcontext);
#endif
/* suspend all DR-controlled threads at safe locations */
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT, &threads,
&num_threads,
/* Case 6821: allow other synch-all-thread uses
* that beat us to not wait on us. We still have
* a problem if we go first since we must xfer
* other threads.
*/
THREAD_SYNCH_NO_LOCKS_NO_XFER, flags)) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_SYNCHRONIZE_THREADS, 2,
get_application_name(), get_application_pid());
}
/* Now we own the thread_initexit_lock. We'll release the locks grabbed in
* synch_with_all_threads below after cleaning up all the threads in case we
* need to grab it during process exit cleanup.
*/
ASSERT(mutex_testlock(&all_threads_synch_lock) &&
mutex_testlock(&thread_initexit_lock));
ASSERT(!doing_detach);
doing_detach = true;
#ifdef HOT_PATCHING_INTERFACE
/* In hotp_only mode, we must remove patches when detaching; we don't want
* to leave in all our hooks and detach; that will definitely crash the app.
*/
if (DYNAMO_OPTION(hotp_only))
hotp_only_detach_helper();
#endif
#ifdef WINDOWS
/* XXX: maybe we should re-check for additional threads that passed the init_apc
* lock but weren't yet initialized and so didn't show up on the list?
*/
LOG(GLOBAL, LOG_ALL, 1,
"Detach : about to unpatch ntdll.dll and fix memory permissions\n");
detach_remove_image_entry_hook(num_threads, threads);
if (!INTERNAL_OPTION(noasynch)) {
/* We have to do this here, before client exit events, as we're letting
* threads go native next. We thus will not detect crashes during client
* exit during detach.
*/
callback_interception_unintercept();
}
#endif
if (!DYNAMO_OPTION(thin_client))
revert_memory_regions();
#ifdef UNIX
unhook_vsyscall();
#endif
LOG(GLOBAL, LOG_ALL, 1,
"Detach : unpatched ntdll.dll and fixed memory permissions\n");
#ifdef WINDOWS
/* Release the APC init lock and let any threads waiting there go native */
LOG(GLOBAL, LOG_ALL, 1, "Detach : Releasing init_apc_go_native_pause\n");
init_apc_go_native_pause = false;
#endif
/* perform exit tasks that require full thread data structs */
dynamo_process_exit_with_thread_info();
#ifdef WINDOWS
/* We need to record a bool indicating whether we can free each thread's
* resources fully or whether we need them for callback cleanup.
*/
cleanup_tpc =
(bool *)global_heap_alloc(num_threads * sizeof(bool) HEAPACCT(ACCT_OTHER));
/* Handle any outstanding callbacks */
detach_stacked_callbacks = detach_handle_callbacks(num_threads, threads, cleanup_tpc);
#endif
LOG(GLOBAL, LOG_ALL, 1, "Detach: starting to translate contexts\n");
for (i = 0; i < num_threads; i++) {
priv_mcontext_t mc;
if (threads[i]->dcontext == my_dcontext) {
my_idx = i;
my_tr = threads[i];
continue;
} else if (IS_CLIENT_THREAD(threads[i]->dcontext)) {
/* i#297 we will kill client-owned threads later after app exit events
* in dynamo_shared_exit().
*/
continue;
} else if (detach_do_not_translate(threads[i])) {
LOG(GLOBAL, LOG_ALL, 2, "Detach: not translating " TIDFMT "\n",
threads[i]->id);
} else {
LOG(GLOBAL, LOG_ALL, 2, "Detach: translating " TIDFMT "\n", threads[i]->id);
DEBUG_DECLARE(ok =)
thread_get_mcontext(threads[i], &mc);
ASSERT(ok);
/* For a thread at a syscall, we use SA_RESTART for our suspend signal,
* so the kernel will adjust the restart point back to the syscall for us
* where expected. This is an artifical signal we're introducing, so an
* app that assumes no signals and assumes its non-auto-restart syscalls
* don't need loops could be broken.
*/
LOG(GLOBAL, LOG_ALL, 3,
/* Having the code bytes can help diagnose post-detach where the code
* cache is gone.
*/
"Detach: pre-xl8 pc=%p (%02x %02x %02x %02x %02x), xsp=%p "
"for thread " TIDFMT "\n",
mc.pc, *mc.pc, *(mc.pc + 1), *(mc.pc + 2), *(mc.pc + 3), *(mc.pc + 4),
mc.xsp, threads[i]->id);
DEBUG_DECLARE(ok =)
translate_mcontext(threads[i], &mc, true /*restore mem*/, NULL /*f*/);
ASSERT(ok);
if (!threads[i]->under_dynamo_control) {
LOG(GLOBAL, LOG_ALL, 1,
"Detach : thread " TIDFMT " already running natively\n",
threads[i]->id);
/* we do need to restore the app ret addr, for native_exec */
if (!DYNAMO_OPTION(thin_client) && DYNAMO_OPTION(native_exec) &&
!vmvector_empty(native_exec_areas)) {
put_back_native_retaddrs(threads[i]->dcontext);
}
}
detach_finalize_translation(threads[i], &mc);
LOG(GLOBAL, LOG_ALL, 1, "Detach: pc=" PFX " for thread " TIDFMT "\n", mc.pc,
threads[i]->id);
ASSERT(!is_dynamo_address(mc.pc) && !in_fcache(mc.pc));
/* XXX case 7457: if the thread is suspended after it received a fault
* but before the kernel copied the faulting context to the user mode
* structures for the handler, it could result in a codemod exception
* that wouldn't happen natively!
*/
DEBUG_DECLARE(ok =)
thread_set_mcontext(threads[i], &mc);
ASSERT(ok);
/* i#249: restore app's PEB/TEB fields */
IF_WINDOWS(restore_peb_pointer_for_thread(threads[i]->dcontext));
}
/* Resumes the thread, which will do kernel-visible cleanup of
* signal state. Resume happens within the synch_all region where
* the thread_initexit_lock is held so that we can clean up thread
* data later.
*/
#ifdef UNIX
os_signal_thread_detach(threads[i]->dcontext);
#endif
LOG(GLOBAL, LOG_ALL, 1, "Detach: thread " TIDFMT " is being resumed as native\n",
threads[i]->id);
os_thread_resume(threads[i]);
}
ASSERT(my_idx != -1 || !internal);
#ifdef UNIX
LOG(GLOBAL, LOG_ALL, 1, "Detach: waiting for threads to fully detach\n");
for (i = 0; i < num_threads; i++) {
if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext))
os_wait_thread_detached(threads[i]->dcontext);
}
#endif
if (!do_cleanup)
return;
/* Clean up each thread now that everyone has gone native. Needs to be
* done with the thread_initexit_lock held, which is true within a synched
* region.
*/
for (i = 0; i < num_threads; i++) {
if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
LOG(GLOBAL, LOG_ALL, 1, "Detach: cleaning up thread " TIDFMT " %s\n",
threads[i]->id, IF_WINDOWS_ELSE(cleanup_tpc[i] ? "and its TPC" : "", ""));
dynamo_other_thread_exit(threads[i] _IF_WINDOWS(!cleanup_tpc[i]));
}
}
if (my_idx != -1) {
/* pre-client thread cleanup (PR 536058) */
dynamo_thread_exit_pre_client(my_dcontext, my_tr->id);
}
LOG(GLOBAL, LOG_ALL, 1, "Detach: Letting slave threads go native\n");
#ifdef WINDOWS
global_heap_free(cleanup_tpc, num_threads * sizeof(bool) HEAPACCT(ACCT_OTHER));
/* XXX: there's a possible race if a thread waiting at APC is still there
* when we unload our dll.
*/
os_thread_yield();
#endif
end_synch_with_all_threads(threads, num_threads, false /*don't resume */);
threads = NULL;
LOG(GLOBAL, LOG_ALL, 1, "Detach: Entering final cleanup and unload\n");
SYSLOG_INTERNAL_INFO("Detaching from process, entering final cleanup");
if (drstats != NULL)
stats_get_snapshot(drstats);
DEBUG_DECLARE(exit_res =)
dynamo_shared_exit(my_tr _IF_WINDOWS(detach_stacked_callbacks));
ASSERT(exit_res == SUCCESS);
detach_finalize_cleanup();
stack_free(d_r_initstack, DYNAMORIO_STACK_SIZE);
dynamo_exit_post_detach();
doing_detach = false;
started_detach = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
dynamo_detaching_flag = LOCK_FREE_STATE;
EXITING_DR();
}
| 1 | 21,896 | Do we need to save the existing value of the stolen reg somehow? | DynamoRIO-dynamorio | c |
@@ -94,7 +94,8 @@ class AdminDirectoryClient(_base_client.BaseClient):
api_errors.ApiExecutionError
"""
members_stub = self.service.members()
- request = members_stub.list(groupKey=group_key)
+ request = members_stub.list(groupKey=group_key,
+ maxResults=500)
results_by_member = []
# TODO: Investigate yielding results to handle large group lists. | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Admin Directory API client."""
import gflags as flags
from googleapiclient.errors import HttpError
from httplib2 import HttpLib2Error
from oauth2client.service_account import ServiceAccountCredentials
from ratelimiter import RateLimiter
from google.cloud.security.common.gcp_api import _base_client
from google.cloud.security.common.gcp_api import errors as api_errors
FLAGS = flags.FLAGS
flags.DEFINE_string('domain_super_admin_email', None,
'An email address of a super-admin in the GSuite domain. '
'REQUIRED: if inventory_groups is enabled.')
flags.DEFINE_string('groups_service_account_key_file', None,
'The key file with credentials for the service account. '
'REQUIRED: If inventory_groups is enabled and '
'runnning locally.')
flags.DEFINE_integer('max_admin_api_calls_per_day', 150000,
'Admin SDK queries per day.')
class AdminDirectoryClient(_base_client.BaseClient):
"""GSuite Admin Directory API Client."""
API_NAME = 'admin'
DEFAULT_QUOTA_TIMESPAN_PER_SECONDS = 86400 # pylint: disable=invalid-name
REQUIRED_SCOPES = frozenset([
'https://www.googleapis.com/auth/admin.directory.group.readonly'
])
def __init__(self):
super(AdminDirectoryClient, self).__init__(
credentials=self._build_credentials(),
api_name=self.API_NAME)
self.rate_limiter = RateLimiter(
FLAGS.max_admin_api_calls_per_day,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
def _build_credentials(self):
"""Build credentials required for accessing the directory API.
Returns:
Credentials as built by oauth2client.
Raises:
api_errors.ApiExecutionError
"""
try:
credentials = ServiceAccountCredentials.from_json_keyfile_name(
FLAGS.groups_service_account_key_file,
scopes=self.REQUIRED_SCOPES)
except (ValueError, KeyError, TypeError) as e:
raise api_errors.ApiExecutionError(
'Error building admin api credential: %s', e)
return credentials.create_delegated(
FLAGS.domain_super_admin_email)
def get_rate_limiter(self):
"""Return an appriopriate rate limiter."""
return RateLimiter(FLAGS.max_admin_api_calls_per_day,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
def get_group_members(self, group_key):
"""Get all the members for specified groups.
Args:
group_key: Its unique id assigned by the Admin API.
Returns:
A list of member objects from the API.
Raises:
api_errors.ApiExecutionError
"""
members_stub = self.service.members()
request = members_stub.list(groupKey=group_key)
results_by_member = []
# TODO: Investigate yielding results to handle large group lists.
while request is not None:
try:
with self.rate_limiter:
response = self._execute(request)
results_by_member.extend(response.get('members', []))
request = members_stub.list_next(request, response)
except (HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(members_stub, e)
return results_by_member
def get_groups(self, customer_id='my_customer'):
"""Get all the groups for a given customer_id.
A note on customer_id='my_customer'.
This is a magic string instead of using the real
customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id: The customer id to scope the request to
Returns:
A list of group objects returned from the API.
Raises:
api_errors.ApiExecutionError
"""
groups_stub = self.service.groups()
request = groups_stub.list(customer=customer_id)
results_by_group = []
# TODO: Investigate yielding results to handle large group lists.
while request is not None:
try:
with self.rate_limiter:
response = self._execute(request)
results_by_group.extend(response.get('groups', []))
request = groups_stub.list_next(request, response)
except (HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(groups_stub, e)
return results_by_group
| 1 | 25,672 | maxResults should come from FLAGS once #244 is submitted. | forseti-security-forseti-security | py |
@@ -12,6 +12,8 @@ using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
+#if NETCOREAPP
+
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
[CollectionDefinition(nameof(HttpMessageHandlerTests), DisableParallelization = true)] | 1 | // <copyright file="HttpMessageHandlerTests.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Runtime.InteropServices;
using Datadog.Trace.ClrProfiler.IntegrationTests.Helpers;
using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
[CollectionDefinition(nameof(HttpMessageHandlerTests), DisableParallelization = true)]
public class HttpMessageHandlerTests : TestHelper
{
public HttpMessageHandlerTests(ITestOutputHelper output)
: base("HttpMessageHandler", output)
{
SetEnvironmentVariable("DD_HTTP_CLIENT_ERROR_STATUSES", "400-499, 502,-343,11-53, 500-500-200");
SetServiceVersion("1.0.0");
}
internal static IEnumerable<InstrumentationOptions> InstrumentationOptionsValues =>
new List<InstrumentationOptions>
{
new InstrumentationOptions(instrumentSocketHandler: false, instrumentWinHttpOrCurlHandler: false),
new InstrumentationOptions(instrumentSocketHandler: false, instrumentWinHttpOrCurlHandler: true),
new InstrumentationOptions(instrumentSocketHandler: true, instrumentWinHttpOrCurlHandler: false),
new InstrumentationOptions(instrumentSocketHandler: true, instrumentWinHttpOrCurlHandler: true),
};
public static IEnumerable<object[]> IntegrationConfig() =>
from instrumentationOptions in InstrumentationOptionsValues
from socketHandlerEnabled in new[] { true, false }
select new object[] { instrumentationOptions, socketHandlerEnabled };
[SkippableTheory]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
[MemberData(nameof(IntegrationConfig))]
public void HttpClient_SubmitsTraces(InstrumentationOptions instrumentation, bool enableSocketsHandler)
{
ConfigureInstrumentation(instrumentation, enableSocketsHandler);
var expectedAsyncCount = CalculateExpectedAsyncSpans(instrumentation);
var expectedSyncCount = CalculateExpectedSyncSpans(instrumentation);
var expectedSpanCount = expectedAsyncCount + expectedSyncCount;
const string expectedOperationName = "http.request";
const string expectedServiceName = "Samples.HttpMessageHandler-http-client";
int httpPort = TcpPortProvider.GetOpenPort();
Output.WriteLine($"Assigning port {httpPort} for the httpPort.");
using (var agent = EnvironmentHelper.GetMockAgent())
using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, arguments: $"Port={httpPort}"))
{
var spans = agent.WaitForSpans(expectedSpanCount, operationName: expectedOperationName);
Assert.Equal(expectedSpanCount, spans.Count);
foreach (var span in spans)
{
Assert.Equal(expectedOperationName, span.Name);
Assert.Equal(expectedServiceName, span.Service);
Assert.Equal(SpanTypes.Http, span.Type);
Assert.Equal("HttpMessageHandler", span.Tags[Tags.InstrumentationName]);
Assert.False(span.Tags?.ContainsKey(Tags.Version), "External service span should not have service version tag.");
if (span.Tags[Tags.HttpStatusCode] == "502")
{
Assert.Equal(1, span.Error);
}
}
var firstSpan = spans.First();
var traceId = StringUtil.GetHeader(processResult.StandardOutput, HttpHeaderNames.TraceId);
var parentSpanId = StringUtil.GetHeader(processResult.StandardOutput, HttpHeaderNames.ParentId);
Assert.Equal(firstSpan.TraceId.ToString(CultureInfo.InvariantCulture), traceId);
Assert.Equal(firstSpan.SpanId.ToString(CultureInfo.InvariantCulture), parentSpanId);
}
}
[SkippableTheory]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
[MemberData(nameof(IntegrationConfig))]
public void TracingDisabled_DoesNotSubmitsTraces(InstrumentationOptions instrumentation, bool enableSocketsHandler)
{
ConfigureInstrumentation(instrumentation, enableSocketsHandler);
const string expectedOperationName = "http.request";
int httpPort = TcpPortProvider.GetOpenPort();
using (var agent = EnvironmentHelper.GetMockAgent())
using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, arguments: $"TracingDisabled Port={httpPort}"))
{
var spans = agent.WaitForSpans(1, 2000, operationName: expectedOperationName);
Assert.Equal(0, spans.Count);
var traceId = StringUtil.GetHeader(processResult.StandardOutput, HttpHeaderNames.TraceId);
var parentSpanId = StringUtil.GetHeader(processResult.StandardOutput, HttpHeaderNames.ParentId);
var tracingEnabled = StringUtil.GetHeader(processResult.StandardOutput, HttpHeaderNames.TracingEnabled);
Assert.Null(traceId);
Assert.Null(parentSpanId);
Assert.Equal("false", tracingEnabled);
}
}
private static int CalculateExpectedAsyncSpans(InstrumentationOptions instrumentation)
{
var isWindows = RuntimeInformation.IsOSPlatform(System.Runtime.InteropServices.OSPlatform.Windows);
// net4x doesn't have patch
var spansPerHttpClient = EnvironmentHelper.IsCoreClr() ? 35 : 31;
var expectedSpanCount = spansPerHttpClient * 2; // default HttpClient and CustomHttpClientHandler
// WinHttpHandler instrumentation is off by default, and only available on Windows
if (isWindows && (instrumentation.InstrumentWinHttpOrCurlHandler ?? false))
{
expectedSpanCount += spansPerHttpClient;
}
// SocketsHttpHandler instrumentation is on by default
if (EnvironmentHelper.IsCoreClr() && (instrumentation.InstrumentSocketHandler ?? true))
{
expectedSpanCount += spansPerHttpClient;
}
#if NETCOREAPP2_1 || NETCOREAPP3_0 || NETCOREAPP3_1
if (instrumentation.InstrumentWinHttpOrCurlHandler == true)
{
// Add 1 span for internal WinHttpHandler and CurlHandler using the HttpMessageInvoker
expectedSpanCount++;
}
#endif
return expectedSpanCount;
}
private static int CalculateExpectedSyncSpans(InstrumentationOptions instrumentation)
{
// Sync requests are only enabled in .NET 5
if (!EnvironmentHelper.IsNet5())
{
return 0;
}
var spansPerHttpClient = 21;
var expectedSpanCount = spansPerHttpClient * 2; // default HttpClient and CustomHttpClientHandler
// SocketsHttpHandler instrumentation is on by default
if (instrumentation.InstrumentSocketHandler ?? true)
{
expectedSpanCount += spansPerHttpClient;
}
return expectedSpanCount;
}
private void ConfigureInstrumentation(InstrumentationOptions instrumentation, bool enableSocketsHandler)
{
// Should HttpClient try to use HttpSocketsHandler
SetEnvironmentVariable("DOTNET_SYSTEM_NET_HTTP_USESOCKETSHTTPHANDLER", enableSocketsHandler ? "1" : "0");
// Enable specific integrations, or use defaults
if (instrumentation.InstrumentSocketHandler.HasValue)
{
SetEnvironmentVariable("DD_HttpSocketsHandler_ENABLED", instrumentation.InstrumentSocketHandler.Value ? "true" : "false");
}
if (instrumentation.InstrumentWinHttpOrCurlHandler.HasValue)
{
SetEnvironmentVariable("DD_WinHttpHandler_ENABLED", instrumentation.InstrumentWinHttpOrCurlHandler.Value ? "true" : "false");
SetEnvironmentVariable("DD_CurlHandler_ENABLED", instrumentation.InstrumentWinHttpOrCurlHandler.Value ? "true" : "false");
}
}
public class InstrumentationOptions : IXunitSerializable
{
// ReSharper disable once UnusedMember.Global
public InstrumentationOptions()
{
}
internal InstrumentationOptions(
bool? instrumentSocketHandler,
bool? instrumentWinHttpOrCurlHandler)
{
InstrumentSocketHandler = instrumentSocketHandler;
InstrumentWinHttpOrCurlHandler = instrumentWinHttpOrCurlHandler;
}
internal bool? InstrumentSocketHandler { get; private set; }
internal bool? InstrumentWinHttpOrCurlHandler { get; private set; }
public void Deserialize(IXunitSerializationInfo info)
{
InstrumentSocketHandler = info.GetValue<bool?>(nameof(InstrumentSocketHandler));
InstrumentWinHttpOrCurlHandler = info.GetValue<bool?>(nameof(InstrumentWinHttpOrCurlHandler));
}
public void Serialize(IXunitSerializationInfo info)
{
info.AddValue(nameof(InstrumentSocketHandler), InstrumentSocketHandler);
info.AddValue(nameof(InstrumentWinHttpOrCurlHandler), InstrumentWinHttpOrCurlHandler);
}
public override string ToString() =>
$"InstrumentSocketHandler={InstrumentSocketHandler},InstrumentWinHttpOrCurlHandler={InstrumentWinHttpOrCurlHandler}";
}
}
}
| 1 | 24,835 | is this wanted? | DataDog-dd-trace-dotnet | .cs |
@@ -268,6 +268,14 @@ def get_docker_executable():
return distutils.spawn.find_executable('docker')
+def get_linode_executable():
+ try:
+ pytest.importorskip('linode')
+ return True
+ except Exception:
+ return False
+
+
def get_lxc_executable():
return distutils.spawn.find_executable('lxc-start')
| 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
# Copyright (c) 2018 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import distutils.spawn
import os
import shutil
import sys
from distutils.version import LooseVersion
import ansible
import pexpect
import pytest
import sh
from molecule import logger
from molecule import util
from ..conftest import change_dir_to
LOG = logger.get_logger(__name__)
IS_TRAVIS = os.getenv('TRAVIS') and os.getenv('CI')
@pytest.fixture
def with_scenario(request, scenario_to_test, driver_name, scenario_name,
skip_test):
scenario_directory = os.path.join(
os.path.dirname(util.abs_path(__file__)), os.path.pardir, 'scenarios',
scenario_to_test)
with change_dir_to(scenario_directory):
yield
if scenario_name:
msg = 'CLEANUP: Destroying instances for all scenario(s)'
LOG.out(msg)
options = {
'driver_name': driver_name,
'all': True,
}
cmd = sh.molecule.bake('destroy', **options)
pytest.helpers.run_command(cmd)
@pytest.fixture
def skip_test(request, driver_name):
msg_tmpl = ("Ignoring '{}' tests for now" if driver_name == 'delegated'
else "Skipped '{}' not supported")
support_checks_map = {
'azure': supports_azure,
'docker': supports_docker,
'ec2': supports_ec2,
'gce': supports_gce,
'linode': supports_linode,
'lxc': supports_lxc,
'lxd': supports_lxd,
'openstack': supports_openstack,
'vagrant': supports_vagrant_virtualbox,
'delegated': demands_delegated,
}
try:
check_func = support_checks_map[driver_name]
if not check_func():
pytest.skip(msg_tmpl.format(driver_name))
except KeyError:
pass
@pytest.helpers.register
def idempotence(scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('idempotence', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def init_role(temp_dir, driver_name):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
cmd = sh.molecule.bake('init', 'role', {
'driver-name': driver_name,
'role-name': 'test-init'
})
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
options = {
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def init_scenario(temp_dir, driver_name):
# Create role
role_directory = os.path.join(temp_dir.strpath, 'test-init')
cmd = sh.molecule.bake('init', 'role', {
'driver-name': driver_name,
'role-name': 'test-init'
})
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
# Create scenario
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
options = {
'scenario_name': 'test-scenario',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def metadata_lint_update(role_directory):
# By default, ansible-lint will fail on newly-created roles because the
# fields in this file have not been changed from their defaults. This is
# good because molecule should create this file using the defaults, and
# users should receive feedback to change these defaults. However, this
# blocks the testing of 'molecule init' itself, so ansible-lint should
# be configured to ignore these metadata lint errors.
ansible_lint_src = os.path.join(
os.path.dirname(util.abs_path(__file__)), '.ansible-lint')
shutil.copy(ansible_lint_src, role_directory)
# Explicitly lint here to catch any unexpected lint errors before
# continuining functional testing. Ansible lint is run at the root
# of the role directory and pointed at the role directory to ensure
# the customize ansible-lint config is used.
with change_dir_to(role_directory):
cmd = sh.ansible_lint.bake('.')
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def list(x):
cmd = sh.molecule.bake('list')
out = pytest.helpers.run_command(cmd, log=False)
out = out.stdout.decode('utf-8')
out = util.strip_ansi_color(out)
for l in x.splitlines():
assert l in out
@pytest.helpers.register
def list_with_format_plain(x):
cmd = sh.molecule.bake('list', {'format': 'plain'})
out = pytest.helpers.run_command(cmd, log=False)
out = out.stdout.decode('utf-8')
out = util.strip_ansi_color(out)
for l in x.splitlines():
assert l in out
@pytest.helpers.register
def login(login_args, scenario_name='default'):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('destroy', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
for instance, regexp in login_args:
if len(login_args) > 1:
child_cmd = 'molecule login --host {} --scenario-name {}'.format(
instance, scenario_name)
else:
child_cmd = 'molecule login --scenario-name {}'.format(
scenario_name)
child = pexpect.spawn(child_cmd)
child.expect(regexp)
# If the test returns and doesn't hang it succeeded.
child.sendline('exit')
@pytest.helpers.register
def test(driver_name, scenario_name='default'):
options = {
'scenario_name': scenario_name,
'all': True,
}
if driver_name == 'delegated':
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def verify(scenario_name='default'):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
def get_docker_executable():
return distutils.spawn.find_executable('docker')
def get_lxc_executable():
return distutils.spawn.find_executable('lxc-start')
def get_lxd_executable():
return distutils.spawn.find_executable('lxd')
def get_vagrant_executable():
return distutils.spawn.find_executable('vagrant')
def get_virtualbox_executable():
return distutils.spawn.find_executable('VBoxManage')
@pytest.helpers.register
def supports_docker():
return get_docker_executable()
@pytest.helpers.register
def supports_linode():
try:
pytest.importorskip('linode')
return True
except Exception:
return False
@pytest.helpers.register
def supports_lxc():
# noqa: E501 # FIXME: Travis CI
# noqa: E501 # This fixes most of the errors:
# noqa: E501 # $ mkdir -p ~/.config/lxc
# noqa: E501 # $ echo "lxc.id_map = u 0 100000 65536" > ~/.config/lxc/default.conf
# noqa: E501 # $ echo "lxc.id_map = g 0 100000 65536" >> ~/.config/lxc/default.conf
# noqa: E501 # $ echo "lxc.network.type = veth" >> ~/.config/lxc/default.conf
# noqa: E501 # $ echo "lxc.network.link = lxcbr0" >> ~/.config/lxc/default.conf
# noqa: E501 # $ echo "$USER veth lxcbr0 2" | sudo tee -a /etc/lxc/lxc-usernet
# noqa: E501 # travis veth lxcbr0 2
# noqa: E501 # But there's still one left:
# noqa: E501 # $ cat ~/lxc-instance.log
# noqa: E501 # lxc-create 1542112494.884 INFO lxc_utils - utils.c:get_rundir:229 - XDG_RUNTIME_DIR isn't set in the environment.
# noqa: E501 # lxc-create 1542112494.884 WARN lxc_log - log.c:lxc_log_init:331 - lxc_log_init called with log already initialized
# noqa: E501 # lxc-create 1542112494.884 INFO lxc_confile - confile.c:config_idmap:1385 - read uid map: type u nsid 0 hostid 100000 range 65536
# noqa: E501 # lxc-create 1542112494.884 INFO lxc_confile - confile.c:config_idmap:1385 - read uid map: type g nsid 0 hostid 100000 range 65536
# noqa: E501 # lxc-create 1542112494.887 ERROR lxc_container - lxccontainer.c:do_create_container_dir:767 - Failed to chown container dir
# noqa: E501 # lxc-create 1542112494.887 ERROR lxc_create_ui - lxc_create.c:main:274 - Error creating container instance
return not IS_TRAVIS and get_lxc_executable()
@pytest.helpers.register
def supports_lxd():
# FIXME: Travis CI
return not IS_TRAVIS and get_lxd_executable()
@pytest.helpers.register
def supports_vagrant_virtualbox():
return (get_vagrant_executable() or get_virtualbox_executable())
@pytest.helpers.register
def demands_delegated():
return pytest.config.getoption('--delegated')
@pytest.helpers.register
def supports_azure():
# FIXME: come up with an actual check
return not IS_TRAVIS # FIXME: Travis CI
@pytest.helpers.register
def supports_ec2():
# FIXME: come up with an actual check
return not IS_TRAVIS # FIXME: Travis CI
@pytest.helpers.register
def supports_gce():
# FIXME: come up with an actual check
return not IS_TRAVIS # FIXME: Travis CI
@pytest.helpers.register
def supports_openstack():
# FIXME: come up with an actual check
return not IS_TRAVIS # FIXME: Travis CI
@pytest.helpers.register
def has_inspec():
return distutils.spawn.find_executable('inspec')
@pytest.helpers.register
def has_rubocop():
return distutils.spawn.find_executable('rubocop')
needs_inspec = pytest.mark.skipif(
not has_inspec(),
reason='Needs inspec to be pre-installed and available in $PATH')
needs_rubocop = pytest.mark.skipif(
not has_rubocop(),
reason='Needs rubocop to be pre-installed and available in $PATH')
@pytest.helpers.register
def is_supported_ansible_python_combo():
ansible_below_25 = LooseVersion(ansible.__version__) < LooseVersion('2.5')
max_py = (3, 6) if ansible_below_25 else (3, 7)
return sys.version_info[:2] <= max_py
skip_unsupported_matrix = pytest.mark.skipif(
not is_supported_ansible_python_combo(),
reason='Current combination of Ansible and Python is not supported')
| 1 | 8,117 | What exception is actually happening here? AFAIK `pytest.importorskip` with just return `None` if there's nothing to import. Which means that this check'd always return `True`. `pytest.importorskip` is specifically designed to trigger skipping the current test anyway so I don't know why you would wrap it like this. | ansible-community-molecule | py |
@@ -22,7 +22,7 @@ module Selenium
class Common
MAX_REDIRECTS = 20 # same as chromium/gecko
CONTENT_TYPE = 'application/json'.freeze
- DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE}.freeze
+ DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE, 'Content-Type' => 'application/x-www-form-urlencoded'}.freeze
attr_accessor :timeout
attr_writer :server_url | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Remote
module Http
class Common
MAX_REDIRECTS = 20 # same as chromium/gecko
CONTENT_TYPE = 'application/json'.freeze
DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE}.freeze
attr_accessor :timeout
attr_writer :server_url
def initialize
@timeout = nil
end
def quit_errors
[IOError]
end
def close
# hook for subclasses - will be called on Driver#quit
end
def call(verb, url, command_hash)
url = server_url.merge(url) unless url.is_a?(URI)
headers = DEFAULT_HEADERS.dup
headers['Cache-Control'] = 'no-cache' if verb == :get
if command_hash
payload = JSON.generate(command_hash)
headers['Content-Type'] = "#{CONTENT_TYPE}; charset=utf-8"
headers['Content-Length'] = payload.bytesize.to_s if [:post, :put].include?(verb)
WebDriver.logger.info(" >>> #{url} | #{payload}")
WebDriver.logger.debug(" > #{headers.inspect}")
elsif verb == :post
payload = '{}'
headers['Content-Length'] = '2'
end
request verb, url, headers, payload
end
private
def server_url
return @server_url if @server_url
raise Error::WebDriverError, 'server_url not set'
end
def request(*)
raise NotImplementedError, 'subclass responsibility'
end
def create_response(code, body, content_type)
code = code.to_i
body = body.to_s.strip
content_type = content_type.to_s
WebDriver.logger.info("<- #{body}")
if content_type.include? CONTENT_TYPE
raise Error::WebDriverError, "empty body: #{content_type.inspect} (#{code})\n#{body}" if body.empty?
Response.new(code, JSON.parse(body))
elsif code == 204
Response.new(code)
else
msg = "unexpected response, code=#{code}, content-type=#{content_type.inspect}"
msg << "\n#{body}" unless body.empty?
raise Error::WebDriverError, msg
end
end
end # Common
end # Http
end # Remote
end # WebDriver
end # Selenium
| 1 | 15,359 | Does it send requests with urlencoded bodies anywhere? I thought it sends only json. Maybe content-type should be `application/json` by default? | SeleniumHQ-selenium | js |
@@ -21,6 +21,7 @@ program
.option('--target_arch <target_arch>', 'target architecture')
.option('--target_apk_base <target_apk_base>', 'target Android OS apk (classic, modern, mono)')
.option('--submodule_sync', 'run submodule sync')
+ .option('--ignore_chromium', 'do not update chromium')
.option('--init', 'initialize all dependencies')
.option('--all', 'This flag is deprecated and no longer has any effect')
.option('--force', 'force reset all projects to origin/ref') | 1 | // Copyright (c) 2019 The Brave Authors. All rights reserved.
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// you can obtain one at http://mozilla.org/MPL/2.0/.
const program = require('commander')
const config = require('../lib/config')
const util = require('../lib/util')
const Log = require('../lib/sync/logging')
const projectNames = config.projectNames.filter((project) => config.projects[project].ref)
program
.version(process.env.npm_package_version)
.arguments('[ref]')
.option('--gclient_file <file>', 'gclient config file location')
.option('--gclient_verbose', 'verbose output for gclient')
.option('--run_hooks', 'This flag is deprecated and no longer has any effect')
.option('--run_sync', 'This flag is deprecated and no longer has any effect')
.option('--target_os <target_os>', 'target OS')
.option('--target_arch <target_arch>', 'target architecture')
.option('--target_apk_base <target_apk_base>', 'target Android OS apk (classic, modern, mono)')
.option('--submodule_sync', 'run submodule sync')
.option('--init', 'initialize all dependencies')
.option('--all', 'This flag is deprecated and no longer has any effect')
.option('--force', 'force reset all projects to origin/ref')
.option('--create', 'create a new branch if needed for [ref]')
async function RunCommand () {
program.parse(process.argv)
config.update(program)
if (program.all || program.run_hooks || program.run_sync) {
Log.warn('--all, --run_hooks and --run_sync are deprecated. Will behave as if flag was not passed. Please update your command to `npm run sync` in the future.')
}
// Perform initial brave-core clone and checkout
if (program.init) {
Log.progress('Performing initial checkout of brave-core')
util.checkoutBraveCore()
}
if (program.init || program.submodule_sync) {
util.submoduleSync()
}
if (program.init) {
util.buildGClientConfig()
}
let braveCoreRef = program.args[0]
if (!braveCoreRef) {
braveCoreRef = program.init ? config.getProjectVersion('brave-core') : null
}
if (braveCoreRef || program.init || program.force) {
// we're doing a reset of brave-core so try to stash any changes
Log.progress('Stashing any local changes')
util.runGit(config.braveCoreDir, ['stash'], true)
}
if (braveCoreRef) {
// try to checkout to the right ref if possible
util.runGit(config.braveCoreDir, ['reset', '--hard', 'HEAD'], true)
result = util.runGit(config.braveCoreDir, ['checkout', braveCoreRef], true)
if (result === null && program.create) {
result = util.runGit(config.braveCoreDir, ['checkout', '-b', braveCoreRef], true)
}
if (result === null) {
Log.error('Could not checkout: ' + braveCoreRef)
}
}
util.gclientSync(program.init || program.force, program.init, braveCoreRef)
await util.applyPatches()
util.gclientRunhooks()
}
Log.progress('Brave Browser Sync starting')
RunCommand()
.then(() => {
Log.progress('Brave Browser Sync complete')
})
.catch((err) => {
Log.error('Brave Browser Sync ERROR:')
console.error(err)
process.exit(1)
})
| 1 | 6,692 | I'm not sure about adding more flags here when we're trying to simplify things, I thought we were going to check for patches changes to decide if we needed to update or not? | brave-brave-browser | js |
@@ -57,13 +57,15 @@ var (
ta.Keyinfo["alfa"].PriKey, 3, big.NewInt(10), []byte{}, testutil.TestGasLimit,
big.NewInt(testutil.TestGasPriceInt64))
- testTransferPb = testTransfer.Proto()
+ testTransferHash = testTransfer.Hash()
+ testTransferPb = testTransfer.Proto()
testExecution, _ = testutil.SignedExecution(ta.Addrinfo["bravo"].String(),
ta.Keyinfo["bravo"].PriKey, 1, big.NewInt(0), testutil.TestGasLimit,
big.NewInt(testutil.TestGasPriceInt64), []byte{})
- testExecutionPb = testExecution.Proto()
+ testExecutionHash = testExecution.Hash()
+ testExecutionPb = testExecution.Proto()
testTransfer1, _ = testutil.SignedTransfer(ta.Addrinfo["charlie"].String(), ta.Keyinfo["producer"].PriKey, 1,
big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64)) | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"context"
"encoding/hex"
"io/ioutil"
"math/big"
"os"
"strconv"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/iotexproject/iotex-election/test/mock/mock_committee"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/test/mock/mock_dispatcher"
"github.com/iotexproject/iotex-core/test/mock/mock_factory"
ta "github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
var (
testTransfer, _ = testutil.SignedTransfer(ta.Addrinfo["alfa"].String(),
ta.Keyinfo["alfa"].PriKey, 3, big.NewInt(10), []byte{}, testutil.TestGasLimit,
big.NewInt(testutil.TestGasPriceInt64))
testTransferPb = testTransfer.Proto()
testExecution, _ = testutil.SignedExecution(ta.Addrinfo["bravo"].String(),
ta.Keyinfo["bravo"].PriKey, 1, big.NewInt(0), testutil.TestGasLimit,
big.NewInt(testutil.TestGasPriceInt64), []byte{})
testExecutionPb = testExecution.Proto()
testTransfer1, _ = testutil.SignedTransfer(ta.Addrinfo["charlie"].String(), ta.Keyinfo["producer"].PriKey, 1,
big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
transferHash1 = testTransfer1.Hash()
testVote1, _ = testutil.SignedVote(ta.Addrinfo["charlie"].String(), ta.Keyinfo["charlie"].PriKey, 5,
testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
voteHash1 = testVote1.Hash()
testExecution1, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["producer"].PriKey, 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
executionHash1 = testExecution1.Hash()
testExecution2, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["charlie"].PriKey, 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
executionHash2 = testExecution2.Hash()
testExecution3, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["alfa"].PriKey, 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
executionHash3 = testExecution3.Hash()
)
var (
delegates = []genesis.Delegate{
{
OperatorAddrStr: identityset.Address(0).String(),
VotesStr: "10",
},
{
OperatorAddrStr: identityset.Address(1).String(),
VotesStr: "10",
},
{
OperatorAddrStr: identityset.Address(2).String(),
VotesStr: "10",
},
}
)
var (
getAccountTests = []struct {
in string
address string
balance string
nonce uint64
pendingNonce uint64
numActions uint64
}{
{ta.Addrinfo["charlie"].String(),
"io1d4c5lp4ea4754wy439g2t99ue7wryu5r2lslh2",
"3",
8,
9,
11,
},
{
ta.Addrinfo["producer"].String(),
"io1mflp9m6hcgm2qcghchsdqj3z3eccrnekx9p0ms",
"9999999999999999999999999991",
1,
6,
2,
},
}
getActionsTests = []struct {
start uint64
count uint64
numActions int
}{
{
1,
11,
11,
},
{
11,
5,
4,
},
}
getActionTests = []struct {
checkPending bool
in string
nonce uint64
senderPubKey string
}{
{
false,
hex.EncodeToString(transferHash1[:]),
1,
testTransfer1.SrcPubkey().HexString(),
},
{
false,
hex.EncodeToString(voteHash1[:]),
5,
testVote1.SrcPubkey().HexString(),
},
{
true,
hex.EncodeToString(executionHash1[:]),
5,
testExecution1.SrcPubkey().HexString(),
},
}
getActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
ta.Addrinfo["producer"].String(),
0,
3,
2,
},
{
ta.Addrinfo["charlie"].String(),
1,
8,
8,
},
}
getUnconfirmedActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
ta.Addrinfo["producer"].String(),
0,
4,
4,
},
}
getActionsByBlockTests = []struct {
blkHeight uint64
start uint64
count uint64
numActions int
}{
{
2,
0,
7,
7,
},
{
4,
0,
5,
5,
},
}
getBlockMetasTests = []struct {
start uint64
count uint64
numBlks int
}{
{
1,
4,
4,
},
{
2,
5,
3,
},
}
getBlockMetaTests = []struct {
blkHeight uint64
numActions int64
transferAmount string
}{
{
2,
7,
"4",
},
{
4,
5,
"0",
},
}
getChainMetaTests = []struct {
// Arguments
emptyChain bool
tpsWindow int
pollProtocolType string
// Expected values
height uint64
numActions int64
tps int64
epoch iotextypes.EpochData
}{
{
emptyChain: true,
},
{
false,
1,
"lifeLongDelegates",
4,
15,
5,
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 1,
},
},
{
false,
5,
"governanceChainCommittee",
4,
15,
15,
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 100,
},
},
}
sendActionTests = []struct {
actionPb *iotextypes.Action
}{
{
testTransferPb,
},
{
testExecutionPb,
},
}
getReceiptByActionTests = []struct {
in string
status uint64
blkHeight uint64
}{
{
hex.EncodeToString(transferHash1[:]),
action.SuccessReceiptStatus,
1,
},
{
hex.EncodeToString(voteHash1[:]),
action.SuccessReceiptStatus,
2,
},
{
hex.EncodeToString(executionHash2[:]),
action.SuccessReceiptStatus,
2,
},
{
hex.EncodeToString(executionHash3[:]),
action.SuccessReceiptStatus,
4,
},
}
readContractTests = []struct {
execHash string
retValue string
}{
{
hex.EncodeToString(executionHash2[:]),
"",
},
}
suggestGasPriceTests = []struct {
defaultGasPrice uint64
suggestedGasPrice uint64
}{
{
1,
1,
},
}
estimateGasForActionTests = []struct {
actionHash string
estimatedGas uint64
}{
{
hex.EncodeToString(transferHash1[:]),
10000,
},
{
hex.EncodeToString(voteHash1[:]),
10000,
},
}
readUnclaimedBalanceTests = []struct {
// Arguments
protocolID string
methodName string
addr string
// Expected values
returnErr bool
balance *big.Int
}{
{
protocolID: rewarding.ProtocolID,
methodName: "UnclaimedBalance",
addr: identityset.Address(0).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(64), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: rewarding.ProtocolID,
methodName: "UnclaimedBalance",
addr: identityset.Address(1).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(0), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: "Wrong ID",
methodName: "UnclaimedBalance",
addr: ta.Addrinfo["producer"].String(),
returnErr: true,
},
{
protocolID: rewarding.ProtocolID,
methodName: "Wrong Method",
addr: ta.Addrinfo["producer"].String(),
returnErr: true,
},
}
readBlockProducersByEpochTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
epoch uint64
numCandidateDelegates uint64
// Expected Values
numBlockProducers int
}{
{
protocolID: "poll",
protocolType: "lifeLongDelegates",
methodName: "BlockProducersByEpoch",
epoch: 1,
numBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "BlockProducersByEpoch",
epoch: 1,
numCandidateDelegates: 2,
numBlockProducers: 2,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "BlockProducersByEpoch",
epoch: 1,
numCandidateDelegates: 1,
numBlockProducers: 1,
},
}
readActiveBlockProducersByEpochTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
epoch uint64
numDelegates uint64
// Expected Values
numActiveBlockProducers int
}{
{
protocolID: "poll",
protocolType: "lifeLongDelegates",
methodName: "ActiveBlockProducersByEpoch",
epoch: 1,
numActiveBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "ActiveBlockProducersByEpoch",
epoch: 1,
numDelegates: 2,
numActiveBlockProducers: 2,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "ActiveBlockProducersByEpoch",
epoch: 1,
numDelegates: 1,
numActiveBlockProducers: 1,
},
}
getEpochMetaTests = []struct {
// Arguments
EpochNumber uint64
pollProtocolType string
// Expected Values
epochData iotextypes.EpochData
numBlksInEpoch int
numConsenusBlockProducers int
numActiveCensusBlockProducers int
}{
{
1,
"lifeLongDelegates",
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 1,
},
4,
24,
24,
},
{
1,
"governanceChainCommittee",
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 100,
},
4,
6,
4,
},
}
)
func TestServer_GetAccount(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, true)
require.NoError(err)
// success
for _, test := range getAccountTests {
request := &iotexapi.GetAccountRequest{Address: test.in}
res, err := svr.GetAccount(context.Background(), request)
require.NoError(err)
accountMeta := res.AccountMeta
require.Equal(test.address, accountMeta.Address)
require.Equal(test.balance, accountMeta.Balance)
require.Equal(test.nonce, accountMeta.Nonce)
require.Equal(test.pendingNonce, accountMeta.PendingNonce)
require.Equal(test.numActions, accountMeta.NumActions)
}
// failure
_, err = svr.GetAccount(context.Background(), &iotexapi.GetAccountRequest{})
require.Error(err)
}
func TestServer_GetActions(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByIndex{
ByIndex: &iotexapi.GetActionsByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
}
}
func TestServer_GetAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, true)
require.NoError(err)
for _, test := range getActionTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByHash{
ByHash: &iotexapi.GetActionByHashRequest{
ActionHash: test.in,
CheckPending: test.checkPending,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.ActionInfo))
act := res.ActionInfo[0]
require.Equal(test.nonce, act.Action.GetCore().GetNonce())
require.Equal(test.senderPubKey, hex.EncodeToString(act.Action.SenderPubKey))
}
}
func TestServer_GetActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByAddr{
ByAddr: &iotexapi.GetActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
}
}
func TestServer_GetUnconfirmedActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, true)
require.NoError(err)
for _, test := range getUnconfirmedActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_UnconfirmedByAddr{
UnconfirmedByAddr: &iotexapi.GetUnconfirmedActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
}
}
func TestServer_GetActionsByBlock(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsByBlockTests {
header, err := svr.bc.BlockHeaderByHeight(test.blkHeight)
require.NoError(err)
blkHash := header.HashBlock()
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByBlk{
ByBlk: &iotexapi.GetActionsByBlockRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
}
}
func TestServer_GetBlockMetas(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getBlockMetasTests {
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByIndex{
ByIndex: &iotexapi.GetBlockMetasByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(test.numBlks, len(res.BlkMetas))
var prevBlkPb *iotextypes.BlockMeta
for _, blkPb := range res.BlkMetas {
if prevBlkPb != nil {
require.True(blkPb.Height > prevBlkPb.Height)
}
prevBlkPb = blkPb
}
}
}
func TestServer_GetBlockMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getBlockMetaTests {
header, err := svr.bc.BlockHeaderByHeight(test.blkHeight)
require.NoError(err)
blkHash := header.HashBlock()
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByHash{
ByHash: &iotexapi.GetBlockMetaByHashRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.BlkMetas))
blkPb := res.BlkMetas[0]
require.Equal(test.numActions, blkPb.NumActions)
require.Equal(test.transferAmount, blkPb.TransferAmount)
}
}
func TestServer_GetChainMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var pol poll.Protocol
for _, test := range getChainMetaTests {
if test.pollProtocolType == "lifeLongDelegates" {
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else if test.pollProtocolType == "governanceChainCommittee" {
committee := mock_committee.NewMockCommittee(ctrl)
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
nil,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
)
committee.EXPECT().HeightByTime(gomock.Any()).Return(test.epoch.GravityChainStartHeight, nil)
}
cfg.API.TpsWindow = test.tpsWindow
svr, err := createServer(cfg, false)
require.NoError(err)
if pol != nil {
require.NoError(svr.registry.ForceRegister(poll.ProtocolID, pol))
}
if test.emptyChain {
mbc := mock_blockchain.NewMockBlockchain(ctrl)
mbc.EXPECT().TipHeight().Return(uint64(0)).Times(1)
svr.bc = mbc
}
res, err := svr.GetChainMeta(context.Background(), &iotexapi.GetChainMetaRequest{})
require.NoError(err)
chainMetaPb := res.ChainMeta
require.Equal(test.height, chainMetaPb.Height)
require.Equal(test.numActions, chainMetaPb.NumActions)
require.Equal(test.tps, chainMetaPb.Tps)
require.Equal(test.epoch.Num, chainMetaPb.Epoch.Num)
require.Equal(test.epoch.Height, chainMetaPb.Epoch.Height)
require.Equal(test.epoch.GravityChainStartHeight, chainMetaPb.Epoch.GravityChainStartHeight)
}
}
func TestServer_SendAction(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
chain := mock_blockchain.NewMockBlockchain(ctrl)
mDp := mock_dispatcher.NewMockDispatcher(ctrl)
broadcastHandlerCount := 0
svr := Server{bc: chain, dp: mDp, broadcastHandler: func(_ context.Context, _ uint32, _ proto.Message) error {
broadcastHandlerCount++
return nil
}}
chain.EXPECT().ChainID().Return(uint32(1)).Times(4)
mDp.EXPECT().HandleBroadcast(gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
for i, test := range sendActionTests {
request := &iotexapi.SendActionRequest{Action: test.actionPb}
_, err := svr.SendAction(context.Background(), request)
require.NoError(err)
require.Equal(i+1, broadcastHandlerCount)
}
}
func TestServer_GetReceiptByAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getReceiptByActionTests {
request := &iotexapi.GetReceiptByActionRequest{ActionHash: test.in}
res, err := svr.GetReceiptByAction(context.Background(), request)
require.NoError(err)
receiptPb := res.ReceiptInfo.Receipt
require.Equal(test.status, receiptPb.Status)
require.Equal(test.blkHeight, receiptPb.BlkHeight)
require.NotEqual(hash.ZeroHash256, res.ReceiptInfo.BlkHash)
}
}
func TestServer_ReadContract(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range readContractTests {
hash, err := toHash256(test.execHash)
require.NoError(err)
exec, err := svr.bc.GetActionByActionHash(hash)
require.NoError(err)
request := &iotexapi.ReadContractRequest{Action: exec.Proto()}
res, err := svr.ReadContract(context.Background(), request)
require.NoError(err)
require.Equal(test.retValue, res.Data)
}
}
func TestServer_SuggestGasPrice(t *testing.T) {
require := require.New(t)
cfg := newConfig()
for _, test := range suggestGasPriceTests {
cfg.API.GasStation.DefaultGas = test.defaultGasPrice
svr, err := createServer(cfg, false)
require.NoError(err)
res, err := svr.SuggestGasPrice(context.Background(), &iotexapi.SuggestGasPriceRequest{})
require.NoError(err)
require.Equal(test.suggestedGasPrice, res.GasPrice)
}
}
func TestServer_EstimateGasForAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range estimateGasForActionTests {
hash, err := toHash256(test.actionHash)
require.NoError(err)
act, err := svr.bc.GetActionByActionHash(hash)
require.NoError(err)
request := &iotexapi.EstimateGasForActionRequest{Action: act.Proto()}
res, err := svr.EstimateGasForAction(context.Background(), request)
require.NoError(err)
require.Equal(test.estimatedGas, res.Gas)
}
}
func TestServer_ReadUnclaimedBalance(t *testing.T) {
cfg := newConfig()
svr, err := createServer(cfg, false)
require.NoError(t, err)
for _, test := range readUnclaimedBalanceTests {
out, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(test.addr)},
})
if test.returnErr {
require.Error(t, err)
continue
}
require.NoError(t, err)
val, ok := big.NewInt(0).SetString(string(out.Data), 10)
require.True(t, ok)
assert.Equal(t, test.balance, val)
}
}
func TestServer_ReadBlockProducersByEpoch(t *testing.T) {
require := require.New(t)
cfg := newConfig()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mbc := mock_blockchain.NewMockBlockchain(ctrl)
committee := mock_committee.NewMockCommittee(ctrl)
candidates := []*state.Candidate{
{
Address: "address1",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
{
Address: "address2",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}
mbc.EXPECT().CandidatesByHeight(gomock.Any()).Return(candidates, nil).Times(2)
for _, test := range readBlockProducersByEpochTests {
var pol poll.Protocol
if test.protocolType == "lifeLongDelegates" {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
mbc,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
test.numCandidateDelegates,
cfg.Genesis.NumDelegates,
)
}
svr, err := createServer(cfg, false)
require.NoError(err)
require.NoError(svr.registry.ForceRegister(poll.ProtocolID, pol))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{byteutil.Uint64ToBytes(test.epoch)},
})
require.NoError(err)
var BlockProducers state.CandidateList
require.NoError(BlockProducers.Deserialize(res.Data))
require.Equal(test.numBlockProducers, len(BlockProducers))
}
}
func TestServer_ReadActiveBlockProducersByEpoch(t *testing.T) {
require := require.New(t)
cfg := newConfig()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mbc := mock_blockchain.NewMockBlockchain(ctrl)
committee := mock_committee.NewMockCommittee(ctrl)
candidates := []*state.Candidate{
{
Address: "address1",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
{
Address: "address2",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}
mbc.EXPECT().CandidatesByHeight(gomock.Any()).Return(candidates, nil).Times(2)
for _, test := range readActiveBlockProducersByEpochTests {
var pol poll.Protocol
if test.protocolType == "lifeLongDelegates" {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
mbc,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
cfg.Genesis.NumCandidateDelegates,
test.numDelegates,
)
}
svr, err := createServer(cfg, false)
require.NoError(err)
require.NoError(svr.registry.ForceRegister(poll.ProtocolID, pol))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{byteutil.Uint64ToBytes(test.epoch)},
})
require.NoError(err)
var activeBlockProducers state.CandidateList
require.NoError(activeBlockProducers.Deserialize(res.Data))
require.Equal(test.numActiveBlockProducers, len(activeBlockProducers))
}
}
func TestServer_GetEpochMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
for _, test := range getEpochMetaTests {
svr, err := createServer(cfg, false)
require.NoError(err)
if test.pollProtocolType == "lifeLongDelegates" {
pol := poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
require.NoError(svr.registry.ForceRegister(poll.ProtocolID, pol))
} else if test.pollProtocolType == "governanceChainCommittee" {
committee := mock_committee.NewMockCommittee(ctrl)
mbc := mock_blockchain.NewMockBlockchain(ctrl)
msf := mock_factory.NewMockFactory(ctrl)
pol, _ := poll.NewGovernanceChainCommitteeProtocol(
mbc,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
)
require.NoError(svr.registry.ForceRegister(poll.ProtocolID, pol))
committee.EXPECT().HeightByTime(gomock.Any()).Return(test.epochData.GravityChainStartHeight, nil)
mbc.EXPECT().TipHeight().Return(uint64(4)).Times(2)
mbc.EXPECT().GetFactory().Return(msf).Times(2)
msf.EXPECT().NewWorkingSet().Return(nil, nil).Times(2)
candidates := []*state.Candidate{
{
Address: "address1",
Votes: big.NewInt(6),
RewardAddress: "rewardAddress",
},
{
Address: "address2",
Votes: big.NewInt(5),
RewardAddress: "rewardAddress",
},
{
Address: "address3",
Votes: big.NewInt(4),
RewardAddress: "rewardAddress",
},
{
Address: "address4",
Votes: big.NewInt(3),
RewardAddress: "rewardAddress",
},
{
Address: "address5",
Votes: big.NewInt(2),
RewardAddress: "rewardAddress",
},
{
Address: "address6",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}
blksPerDelegate := map[string]uint64{
"address1": uint64(1),
"address2": uint64(1),
"address3": uint64(1),
"address4": uint64(1),
}
mbc.EXPECT().ProductivityByEpoch(test.EpochNumber).Return(uint64(4), blksPerDelegate, nil).Times(1)
mbc.EXPECT().CandidatesByHeight(uint64(1)).
Return(candidates, nil).Times(1)
svr.bc = mbc
}
res, err := svr.GetEpochMeta(context.Background(), &iotexapi.GetEpochMetaRequest{EpochNumber: test.EpochNumber})
require.NoError(err)
require.Equal(test.epochData.Num, res.EpochData.Num)
require.Equal(test.epochData.Height, res.EpochData.Height)
require.Equal(test.epochData.GravityChainStartHeight, res.EpochData.GravityChainStartHeight)
require.Equal(test.numBlksInEpoch, int(res.TotalBlocks))
require.Equal(test.numConsenusBlockProducers, len(res.BlockProducersInfo))
var numActiveBlockProducers int
var prevInfo *iotexapi.BlockProducerInfo
for _, bp := range res.BlockProducersInfo {
if bp.Active {
numActiveBlockProducers++
}
if prevInfo != nil {
prevVotes, _ := strconv.Atoi(prevInfo.Votes)
currVotes, _ := strconv.Atoi(bp.Votes)
require.True(prevVotes >= currVotes)
}
prevInfo = bp
}
require.Equal(test.numActiveCensusBlockProducers, numActiveBlockProducers)
}
}
func addProducerToFactory(sf factory.Factory) error {
ws, err := sf.NewWorkingSet()
if err != nil {
return err
}
if _, err = accountutil.LoadOrCreateAccount(
ws,
ta.Addrinfo["producer"].String(),
unit.ConvertIotxToRau(10000000000),
); err != nil {
return err
}
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: ta.Addrinfo["producer"],
GasLimit: gasLimit,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return err
}
return sf.Commit(ws)
}
func addTestingBlocks(bc blockchain.Blockchain) error {
addr0 := ta.Addrinfo["producer"].String()
priKey0 := ta.Keyinfo["producer"].PriKey
addr1 := ta.Addrinfo["alfa"].String()
priKey1 := ta.Keyinfo["alfa"].PriKey
addr2 := ta.Addrinfo["bravo"].String()
addr3 := ta.Addrinfo["charlie"].String()
priKey3 := ta.Keyinfo["charlie"].PriKey
addr4 := ta.Addrinfo["delta"].String()
// Add block 1
// Producer transfer--> C
tsf, err := testutil.SignedTransfer(addr3, priKey0, 1, big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[addr0] = []action.SealedEnvelope{tsf}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 2
// Charlie transfer--> A, B, D, P
// Charlie vote--> C
// Charlie exec--> D
recipients := []string{addr1, addr2, addr4, addr0}
selps := make([]action.SealedEnvelope, 0)
for i, recipient := range recipients {
selp, err := testutil.SignedTransfer(recipient, priKey3, uint64(i+1), big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
selps = append(selps, selp)
}
vote1, err := testutil.SignedVote(addr3, priKey3, 5, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
execution1, err := testutil.SignedExecution(addr4, priKey3, 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
if err != nil {
return err
}
selps = append(selps, vote1)
selps = append(selps, execution1)
actionMap = make(map[string][]action.SealedEnvelope)
actionMap[addr3] = selps
if blk, err = bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 3
// Empty actions
if blk, err = bc.MintNewBlock(
nil,
testutil.TimestampNow(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 4
// Charlie vote--> C
// Charlie exec--> D
// Alfa vote--> A
// Alfa exec--> D
vote1, err = testutil.SignedVote(addr3, priKey3, 7, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
vote2, err := testutil.SignedVote(addr1, priKey1, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
execution1, err = testutil.SignedExecution(addr4, priKey3, 8,
big.NewInt(2), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
if err != nil {
return err
}
execution2, err := testutil.SignedExecution(addr4, priKey1, 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
if err != nil {
return err
}
actionMap = make(map[string][]action.SealedEnvelope)
actionMap[addr3] = []action.SealedEnvelope{vote1, execution1}
actionMap[addr1] = []action.SealedEnvelope{vote2, execution2}
if blk, err = bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
return bc.CommitBlock(blk)
}
func addActsToActPool(ap actpool.ActPool) error {
// Producer transfer--> A
tsf1, err := testutil.SignedTransfer(ta.Addrinfo["alfa"].String(), ta.Keyinfo["producer"].PriKey, 2, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// Producer vote--> P
vote1, err := testutil.SignedVote(ta.Addrinfo["producer"].String(), ta.Keyinfo["producer"].PriKey, 3, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// Producer transfer--> B
tsf2, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 4, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// Producer exec--> D
execution1, err := testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["producer"].PriKey, 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
if err != nil {
return err
}
if err := ap.Add(tsf1); err != nil {
return err
}
if err := ap.Add(vote1); err != nil {
return err
}
if err := ap.Add(tsf2); err != nil {
return err
}
return ap.Add(execution1)
}
func setupChain(cfg config.Config) (blockchain.Blockchain, *protocol.Registry, error) {
cfg.Chain.ProducerPrivKey = hex.EncodeToString(identityset.PrivateKey(0).Bytes())
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
if err != nil {
return nil, nil, err
}
// create chain
registry := protocol.Registry{}
bc := blockchain.NewBlockchain(
cfg,
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.InMemDaoOption(),
blockchain.RegistryOption(®istry),
blockchain.EnableExperimentalActions(),
)
if bc == nil {
return nil, nil, errors.New("failed to create blockchain")
}
acc := account.NewProtocol()
v := vote.NewProtocol(bc)
evm := execution.NewProtocol(bc)
p := poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
r := rewarding.NewProtocol(bc, rolldposProtocol)
if err := registry.Register(rolldpos.ProtocolID, rolldposProtocol); err != nil {
return nil, nil, err
}
if err := registry.Register(account.ProtocolID, acc); err != nil {
return nil, nil, err
}
if err := registry.Register(vote.ProtocolID, v); err != nil {
return nil, nil, err
}
if err := registry.Register(execution.ProtocolID, evm); err != nil {
return nil, nil, err
}
if err := registry.Register(rewarding.ProtocolID, r); err != nil {
return nil, nil, err
}
if err := registry.Register(poll.ProtocolID, p); err != nil {
return nil, nil, err
}
sf.AddActionHandlers(acc, v, evm, r)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc, v, evm, r)
return bc, ®istry, nil
}
func setupActPool(bc blockchain.Blockchain, cfg config.ActPool) (actpool.ActPool, error) {
ap, err := actpool.NewActPool(bc, cfg, actpool.EnableExperimentalActions())
if err != nil {
return nil, err
}
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
ap.AddActionValidators(vote.NewProtocol(bc), execution.NewProtocol(bc))
return ap, nil
}
func newConfig() config.Config {
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.EnableAsyncIndexWrite = false
cfg.Genesis.EnableGravityChainVoting = true
cfg.ActPool.MinGasPriceStr = "0"
return cfg
}
func createServer(cfg config.Config, needActPool bool) (*Server, error) {
bc, registry, err := setupChain(cfg)
if err != nil {
return nil, err
}
ctx := context.Background()
// Start blockchain
if err := bc.Start(ctx); err != nil {
return nil, err
}
// Create state for producer
if err := addProducerToFactory(bc.GetFactory()); err != nil {
return nil, err
}
// Add testing blocks
if err := addTestingBlocks(bc); err != nil {
return nil, err
}
var ap actpool.ActPool
if needActPool {
ap, err = setupActPool(bc, cfg.ActPool)
if err != nil {
return nil, err
}
// Add actions to actpool
if err := addActsToActPool(ap); err != nil {
return nil, err
}
}
apiCfg := config.API{TpsWindow: cfg.API.TpsWindow, GasStation: cfg.API.GasStation}
svr := &Server{
bc: bc,
ap: ap,
cfg: apiCfg,
gs: gasstation.NewGasStation(bc, apiCfg),
registry: registry,
}
return svr, nil
}
| 1 | 17,260 | `testExecutionPb` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -127,17 +127,6 @@ class ImageProvider extends FileProvider
return array_merge($params, $options);
}
- /**
- * {@inheritdoc}
- */
- public function getReferenceImage(MediaInterface $media)
- {
- return sprintf('%s/%s',
- $this->generatePath($media),
- $media->getProviderReference()
- );
- }
-
/**
* {@inheritdoc}
*/ | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Provider;
use Gaufrette\Filesystem;
use Imagine\Image\ImagineInterface;
use Sonata\CoreBundle\Model\Metadata;
use Sonata\MediaBundle\CDN\CDNInterface;
use Sonata\MediaBundle\Generator\GeneratorInterface;
use Sonata\MediaBundle\Metadata\MetadataBuilderInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Thumbnail\ThumbnailInterface;
use Symfony\Component\HttpFoundation\File\File;
use Symfony\Component\HttpFoundation\File\UploadedFile;
class ImageProvider extends FileProvider
{
/**
* @var ImagineInterface
*/
protected $imagineAdapter;
/**
* @param string $name
* @param Filesystem $filesystem
* @param CDNInterface $cdn
* @param GeneratorInterface $pathGenerator
* @param ThumbnailInterface $thumbnail
* @param array $allowedExtensions
* @param array $allowedMimeTypes
* @param ImagineInterface $adapter
* @param MetadataBuilderInterface $metadata
*/
public function __construct($name, Filesystem $filesystem, CDNInterface $cdn, GeneratorInterface $pathGenerator, ThumbnailInterface $thumbnail, array $allowedExtensions, array $allowedMimeTypes, ImagineInterface $adapter, MetadataBuilderInterface $metadata = null)
{
parent::__construct($name, $filesystem, $cdn, $pathGenerator, $thumbnail, $allowedExtensions, $allowedMimeTypes, $metadata);
$this->imagineAdapter = $adapter;
}
/**
* {@inheritdoc}
*/
public function getProviderMetadata()
{
return new Metadata($this->getName(), $this->getName().'.description', false, 'SonataMediaBundle', ['class' => 'fa fa-picture-o']);
}
/**
* {@inheritdoc}
*/
public function getHelperProperties(MediaInterface $media, $format, $options = [])
{
if (MediaProviderInterface::FORMAT_REFERENCE === $format) {
$box = $media->getBox();
} else {
$resizerFormat = $this->getFormat($format);
if (false === $resizerFormat) {
throw new \RuntimeException(sprintf('The image format "%s" is not defined.
Is the format registered in your ``sonata_media`` configuration?', $format));
}
$box = $this->resizer->getBox($media, $resizerFormat);
}
$mediaWidth = $box->getWidth();
$params = [
'alt' => $media->getName(),
'title' => $media->getName(),
'src' => $this->generatePublicUrl($media, $format),
'width' => $mediaWidth,
'height' => $box->getHeight(),
];
if (MediaProviderInterface::FORMAT_ADMIN !== $format) {
$srcSetFormats = $this->getFormats();
if (isset($options['srcset']) && is_array($options['srcset'])) {
$srcSetFormats = [];
foreach ($options['srcset'] as $srcSetFormat) {
$formatName = $this->getFormatName($media, $srcSetFormat);
$srcSetFormats[$formatName] = $this->getFormat($formatName);
}
unset($options['srcset']);
// Make sure the requested format is also in the srcSetFormats
if (!isset($srcSetFormats[$format])) {
$srcSetFormats[$format] = $this->getFormat($format);
}
}
if (!isset($options['srcset'])) {
$srcSet = [];
foreach ($srcSetFormats as $providerFormat => $settings) {
// Check if format belongs to the current media's context
if (0 === strpos($providerFormat, $media->getContext())) {
$width = $this->resizer->getBox($media, $settings)->getWidth();
$srcSet[] = sprintf('%s %dw', $this->generatePublicUrl($media, $providerFormat), $width);
}
}
// The reference format is not in the formats list
$srcSet[] = sprintf(
'%s %dw',
$this->generatePublicUrl($media, MediaProviderInterface::FORMAT_REFERENCE),
$media->getBox()->getWidth()
);
$params['srcset'] = implode(', ', $srcSet);
}
$params['sizes'] = sprintf('(max-width: %1$dpx) 100vw, %1$dpx', $mediaWidth);
}
return array_merge($params, $options);
}
/**
* {@inheritdoc}
*/
public function getReferenceImage(MediaInterface $media)
{
return sprintf('%s/%s',
$this->generatePath($media),
$media->getProviderReference()
);
}
/**
* {@inheritdoc}
*/
public function updateMetadata(MediaInterface $media, $force = true)
{
try {
if (!$media->getBinaryContent() instanceof \SplFileInfo) {
// this is now optimized at all!!!
$path = tempnam(sys_get_temp_dir(), 'sonata_update_metadata');
$fileObject = new \SplFileObject($path, 'w');
$fileObject->fwrite($this->getReferenceFile($media)->getContent());
} else {
$fileObject = $media->getBinaryContent();
}
$image = $this->imagineAdapter->open($fileObject->getPathname());
$size = $image->getSize();
$media->setSize($fileObject->getSize());
$media->setWidth($size->getWidth());
$media->setHeight($size->getHeight());
} catch (\LogicException $e) {
$media->setProviderStatus(MediaInterface::STATUS_ERROR);
$media->setSize(0);
$media->setWidth(0);
$media->setHeight(0);
}
}
/**
* {@inheritdoc}
*/
public function generatePublicUrl(MediaInterface $media, $format)
{
if (MediaProviderInterface::FORMAT_REFERENCE === $format) {
$path = $this->getReferenceImage($media);
} else {
$path = $this->thumbnail->generatePublicUrl($this, $media, $format);
}
// if $path is already an url, no further action is required
if (null !== parse_url($path, PHP_URL_SCHEME)) {
return $path;
}
return $this->getCdn()->getPath($path, $media->getCdnIsFlushable());
}
/**
* {@inheritdoc}
*/
public function generatePrivateUrl(MediaInterface $media, $format)
{
return $this->thumbnail->generatePrivateUrl($this, $media, $format);
}
/**
* {@inheritdoc}
*/
protected function doTransform(MediaInterface $media)
{
parent::doTransform($media);
if ($media->getBinaryContent() instanceof UploadedFile) {
$fileName = $media->getBinaryContent()->getClientOriginalName();
} elseif ($media->getBinaryContent() instanceof File) {
$fileName = $media->getBinaryContent()->getFilename();
} else {
// Should not happen, FileProvider should throw an exception in that case
return;
}
if (!in_array(strtolower(pathinfo($fileName, PATHINFO_EXTENSION)), $this->allowedExtensions)
|| !in_array($media->getBinaryContent()->getMimeType(), $this->allowedMimeTypes)) {
return;
}
try {
$image = $this->imagineAdapter->open($media->getBinaryContent()->getPathname());
} catch (\RuntimeException $e) {
$media->setProviderStatus(MediaInterface::STATUS_ERROR);
return;
}
$size = $image->getSize();
$media->setWidth($size->getWidth());
$media->setHeight($size->getHeight());
$media->setProviderStatus(MediaInterface::STATUS_OK);
}
}
| 1 | 10,301 | ~Method is public and removing it would be a BC Break, you can deprecate it if you want.~ | sonata-project-SonataMediaBundle | php |
@@ -113,7 +113,7 @@ func (i *interpreter) interpretStatements(s *scope, statements []*Statement) (re
} else {
err = fmt.Errorf("%s", r)
}
- log.Debug("%s", debug.Stack())
+ log.Debug("%v:\n %s", err, debug.Stack())
}
}()
return s.interpretStatements(statements), nil // Would have panicked if there was an error | 1 | package asp
import (
"context"
"fmt"
"reflect"
"runtime/debug"
"runtime/pprof"
"strings"
"sync"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
// An interpreter holds the package-independent state about our parsing process.
type interpreter struct {
scope *scope
parser *Parser
subincludes map[string]pyDict
config map[*core.Configuration]*pyConfig
mutex sync.RWMutex
configMutex sync.RWMutex
breakpointMutex sync.Mutex
limiter semaphore
profiling bool
}
// newInterpreter creates and returns a new interpreter instance.
// It loads all the builtin rules at this point.
func newInterpreter(state *core.BuildState, p *Parser) *interpreter {
s := &scope{
ctx: context.Background(),
state: state,
locals: map[string]pyObject{},
}
i := &interpreter{
scope: s,
parser: p,
subincludes: map[string]pyDict{},
config: map[*core.Configuration]*pyConfig{},
limiter: make(semaphore, state.Config.Parse.NumThreads),
profiling: state.Config.Profiling,
}
s.interpreter = i
s.LoadSingletons(state)
return i
}
// LoadBuiltins loads a set of builtins from a file, optionally with its contents.
func (i *interpreter) LoadBuiltins(filename string, contents []byte, statements []*Statement) error {
s := i.scope.NewScope()
// Gentle hack - attach the native code once we have loaded the correct file.
// Needs to be after this file is loaded but before any of the others that will
// use functions from it.
if filename == "builtins.build_defs" {
defer registerBuiltins(s)
} else if filename == "misc_rules.build_defs" {
defer registerSubincludePackage(s)
} else if filename == "config_rules.build_defs" {
defer setNativeCode(s, "select", selectFunc)
}
defer i.scope.SetAll(s.Freeze(), true)
if statements != nil {
_, err := i.interpretStatements(s, statements)
return err
} else if len(contents) != 0 {
stmts, err := i.parser.ParseData(contents, filename)
for _, stmt := range stmts {
if stmt.FuncDef != nil {
stmt.FuncDef.KeywordsOnly = !whitelistedKwargs(stmt.FuncDef.Name, filename)
stmt.FuncDef.IsBuiltin = true
}
}
return i.loadBuiltinStatements(s, stmts, err)
}
stmts, err := i.parser.parse(filename)
return i.loadBuiltinStatements(s, stmts, err)
}
// loadBuiltinStatements loads statements as builtins.
func (i *interpreter) loadBuiltinStatements(s *scope, statements []*Statement, err error) error {
if err != nil {
return err
}
i.optimiseExpressions(statements)
_, err = i.interpretStatements(s, i.parser.optimise(statements))
return err
}
// interpretAll runs a series of statements in the context of the given package.
// The first return value is for testing only.
func (i *interpreter) interpretAll(pkg *core.Package, statements []*Statement) (s *scope, err error) {
s = i.scope.NewPackagedScope(pkg, 1)
// Config needs a little separate tweaking.
// Annoyingly we'd like to not have to do this at all, but it's very hard to handle
// mutating operations like .setdefault() otherwise.
s.config = i.pkgConfig(pkg).Copy()
s.Set("CONFIG", s.config)
_, err = i.interpretStatements(s, statements)
if err == nil {
s.Callback = true // From here on, if anything else uses this scope, it's in a post-build callback.
}
return s, err
}
// interpretStatements runs a series of statements in the context of the given scope.
func (i *interpreter) interpretStatements(s *scope, statements []*Statement) (ret pyObject, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(error); ok {
err = e
} else {
err = fmt.Errorf("%s", r)
}
log.Debug("%s", debug.Stack())
}
}()
return s.interpretStatements(statements), nil // Would have panicked if there was an error
}
// Subinclude returns the global values corresponding to subincluding the given file.
func (i *interpreter) Subinclude(path string, label core.BuildLabel, pkg *core.Package) pyDict {
i.mutex.RLock()
globals, present := i.subincludes[path]
i.mutex.RUnlock()
if present {
return globals
}
// If we get here, it's not been subincluded already. Parse it now.
// Note that there is a race here whereby it's possible for two packages to parse the same
// subinclude simultaneously - this doesn't matter since they'll get different but equivalent
// scopes, and sooner or later things will sort themselves out.
stmts, err := i.parser.parse(path)
if err != nil {
panic(err) // We're already inside another interpreter, which will handle this for us.
}
stmts = i.parser.optimise(stmts)
s := i.scope.NewScope()
s.contextPkg = pkg
s.subincludeLabel = &label
// Scope needs a local version of CONFIG
s.config = i.scope.config.Copy()
s.Set("CONFIG", s.config)
i.optimiseExpressions(stmts)
s.interpretStatements(stmts)
locals := s.Freeze()
if s.config.overlay == nil {
delete(locals, "CONFIG") // Config doesn't have any local modifications
}
i.mutex.Lock()
defer i.mutex.Unlock()
i.subincludes[path] = locals
return s.locals
}
// getConfig returns a new configuration object for the given configuration object.
func (i *interpreter) getConfig(state *core.BuildState) *pyConfig {
i.configMutex.RLock()
if c, present := i.config[state.Config]; present {
i.configMutex.RUnlock()
return c
}
i.configMutex.RUnlock()
i.configMutex.Lock()
defer i.configMutex.Unlock()
c := newConfig(state)
i.config[state.Config] = c
return c
}
// pkgConfig returns a new configuration object for the given package.
func (i *interpreter) pkgConfig(pkg *core.Package) *pyConfig {
if pkg.Subrepo != nil && pkg.Subrepo.State != nil {
return i.getConfig(pkg.Subrepo.State)
}
return i.getConfig(i.scope.state)
}
// optimiseExpressions implements a peephole optimiser for expressions by precalculating constants
// and identifying simple local variable lookups.
func (i *interpreter) optimiseExpressions(stmts []*Statement) {
WalkAST(stmts, func(expr *Expression) bool {
if constant := i.scope.Constant(expr); constant != nil {
expr.Optimised = &OptimisedExpression{Constant: constant} // Extract constant expression
expr.Val = nil
return false
} else if expr.Val != nil && expr.Val.Ident != nil && expr.Val.Call == nil && expr.Op == nil && expr.If == nil && len(expr.Val.Slices) == 0 {
if expr.Val.Property == nil && len(expr.Val.Ident.Action) == 0 {
expr.Optimised = &OptimisedExpression{Local: expr.Val.Ident.Name}
return false
} else if expr.Val.Ident.Name == "CONFIG" && len(expr.Val.Ident.Action) == 1 && expr.Val.Ident.Action[0].Property != nil && len(expr.Val.Ident.Action[0].Property.Action) == 0 {
expr.Optimised = &OptimisedExpression{Config: expr.Val.Ident.Action[0].Property.Name}
expr.Val = nil
return false
}
}
return true
})
}
// A scope contains all the information about a lexical scope.
type scope struct {
ctx context.Context
interpreter *interpreter
state *core.BuildState
pkg *core.Package
parent *scope
locals pyDict
config *pyConfig
globber *fs.Globber
// True if this scope is for a pre- or post-build callback.
Callback bool
// used during subincludes
contextPkg *core.Package
// The label that was passed to subinclude(...)
subincludeLabel *core.BuildLabel
}
// NewScope creates a new child scope of this one.
func (s *scope) NewScope() *scope {
return s.NewPackagedScope(s.pkg, 0)
}
// NewPackagedScope creates a new child scope of this one pointing to the given package.
// hint is a size hint for the new set of locals.
func (s *scope) NewPackagedScope(pkg *core.Package, hint int) *scope {
s2 := &scope{
ctx: s.ctx,
interpreter: s.interpreter,
state: s.state,
pkg: pkg,
contextPkg: pkg,
parent: s,
locals: make(pyDict, hint),
config: s.config,
Callback: s.Callback,
}
if pkg != nil && pkg.Subrepo != nil && pkg.Subrepo.State != nil {
s2.state = pkg.Subrepo.State
}
return s2
}
// Error emits an error that stops further interpretation.
// For convenience it is declared to return a pyObject but it never actually returns.
func (s *scope) Error(msg string, args ...interface{}) pyObject {
panic(fmt.Errorf(msg, args...))
}
// Assert emits an error that stops further interpretation if the given condition is false.
func (s *scope) Assert(condition bool, msg string, args ...interface{}) {
if !condition {
s.Error(msg, args...)
}
}
// NAssert is the inverse of Assert, it emits an error if the given condition is true.
func (s *scope) NAssert(condition bool, msg string, args ...interface{}) {
if condition {
s.Error(msg, args...)
}
}
// Lookup looks up a variable name in this scope, walking back up its ancestor scopes as needed.
// It panics if the variable is not defined.
func (s *scope) Lookup(name string) pyObject {
if obj, present := s.locals[name]; present {
return obj
} else if s.parent != nil {
return s.parent.Lookup(name)
}
return s.Error("name '%s' is not defined", name)
}
// LocalLookup looks up a variable name in the current scope.
// It does *not* walk back up parent scopes and instead returns nil if the variable could not be found.
// This is typically used for things like function arguments where we're only interested in variables
// in immediate scope.
func (s *scope) LocalLookup(name string) pyObject {
return s.locals[name]
}
// Set sets the given variable in this scope.
func (s *scope) Set(name string, value pyObject) {
s.locals[name] = value
}
// SetAll sets all contents of the given dict in this scope.
// Optionally it can filter to just public objects (i.e. those not prefixed with an underscore)
func (s *scope) SetAll(d pyDict, publicOnly bool) {
for k, v := range d {
if k == "CONFIG" {
// Special case; need to merge config entries rather than overwriting the entire object.
c, ok := v.(*pyFrozenConfig)
s.Assert(ok, "incoming CONFIG isn't a config object")
s.config.Merge(c)
} else if !publicOnly || k[0] != '_' {
s.locals[k] = v
}
}
}
// Freeze freezes the contents of this scope, preventing mutable objects from being changed.
// It returns the newly frozen set of locals.
func (s *scope) Freeze() pyDict {
for k, v := range s.locals {
if f, ok := v.(freezable); ok {
s.locals[k] = f.Freeze()
}
}
return s.locals
}
// LoadSingletons loads the global builtin singletons into this scope.
func (s *scope) LoadSingletons(state *core.BuildState) {
s.Set("True", True)
s.Set("False", False)
s.Set("None", None)
if state != nil {
s.config = s.interpreter.getConfig(state)
s.Set("CONFIG", s.config)
}
}
// interpretStatements interprets a series of statements in a particular scope.
// Note that the return value is only non-nil if a return statement is encountered;
// it is not implicitly the result of the last statement or anything like that.
func (s *scope) interpretStatements(statements []*Statement) pyObject {
var stmt *Statement
defer func() {
if r := recover(); r != nil {
panic(AddStackFrame(stmt.Pos, r))
}
}()
for _, stmt = range statements {
if stmt.FuncDef != nil {
s.Set(stmt.FuncDef.Name, newPyFunc(s, stmt.FuncDef))
} else if stmt.If != nil {
if ret := s.interpretIf(stmt.If); ret != nil {
return ret
}
} else if stmt.For != nil {
if ret := s.interpretFor(stmt.For); ret != nil {
return ret
}
} else if stmt.Return != nil {
if len(stmt.Return.Values) == 0 {
return None
} else if len(stmt.Return.Values) == 1 {
return s.interpretExpression(stmt.Return.Values[0])
}
return pyList(s.evaluateExpressions(stmt.Return.Values))
} else if stmt.Ident != nil {
s.interpretIdentStatement(stmt.Ident)
} else if stmt.Assert != nil {
if !s.interpretExpression(stmt.Assert.Expr).IsTruthy() {
if stmt.Assert.Message == nil {
s.Error("assertion failed")
} else {
s.Error(s.interpretExpression(stmt.Assert.Message).String())
}
}
} else if stmt.Raise != nil {
log.Warning("The raise keyword is deprecated, please use fail() instead. See https://github.com/thought-machine/please/issues/1598 for more information.")
s.Error(s.interpretExpression(stmt.Raise).String())
} else if stmt.Literal != nil {
s.interpretExpression(stmt.Literal)
} else if stmt.Continue {
// This is definitely awkward since we need to control a for loop that's happening in a function outside this scope.
return continueIteration
} else if stmt.Pass {
continue // Nothing to do...
} else {
s.Error("Unknown statement") // Shouldn't happen, amirite?
}
}
return nil
}
func (s *scope) interpretIf(stmt *IfStatement) pyObject {
if s.interpretExpression(&stmt.Condition).IsTruthy() {
return s.interpretStatements(stmt.Statements)
}
for _, elif := range stmt.Elif {
if s.interpretExpression(&elif.Condition).IsTruthy() {
return s.interpretStatements(elif.Statements)
}
}
return s.interpretStatements(stmt.ElseStatements)
}
func (s *scope) interpretFor(stmt *ForStatement) pyObject {
for _, li := range s.iterate(&stmt.Expr) {
s.unpackNames(stmt.Names, li)
if ret := s.interpretStatements(stmt.Statements); ret != nil {
if s, ok := ret.(pySentinel); ok && s == continueIteration {
continue
}
return ret
}
}
return nil
}
func (s *scope) interpretExpression(expr *Expression) pyObject {
// Check the optimised sites first
if expr.Optimised != nil {
if expr.Optimised.Constant != nil {
return expr.Optimised.Constant
} else if expr.Optimised.Local != "" {
return s.Lookup(expr.Optimised.Local)
}
return s.config.Property(expr.Optimised.Config)
}
defer func() {
if r := recover(); r != nil {
panic(AddStackFrame(expr.Pos, r))
}
}()
if expr.If != nil && !s.interpretExpression(expr.If.Condition).IsTruthy() {
return s.interpretExpression(expr.If.Else)
}
var obj pyObject
if expr.Val != nil {
obj = s.interpretValueExpression(expr.Val)
} else if expr.UnaryOp != nil {
obj = s.interpretValueExpression(&expr.UnaryOp.Expr)
if expr.UnaryOp.Op == "not" {
obj = s.negate(obj)
} else {
i, ok := obj.(pyInt)
s.Assert(ok, "Unary - can only be applied to an integer")
obj = pyInt(-int(i))
}
}
for _, op := range expr.Op {
switch op.Op {
case And, Or:
// Careful here to mimic lazy-evaluation semantics (import for `x = x or []` etc)
if obj.IsTruthy() == (op.Op == And) {
obj = s.interpretExpression(op.Expr)
}
case Equal:
obj = newPyBool(reflect.DeepEqual(obj, s.interpretExpression(op.Expr)))
case NotEqual:
obj = newPyBool(!reflect.DeepEqual(obj, s.interpretExpression(op.Expr)))
case Is:
obj = s.interpretIs(obj, op)
case IsNot:
obj = s.negate(s.interpretIs(obj, op))
case In, NotIn:
// the implementation of in is defined by the right-hand side, not the left.
obj = s.interpretExpression(op.Expr).Operator(op.Op, obj)
default:
obj = obj.Operator(op.Op, s.interpretExpression(op.Expr))
}
}
return obj
}
func (s *scope) interpretIs(obj pyObject, op OpExpression) pyObject {
// Is only works None or boolean types.
expr := s.interpretExpression(op.Expr)
switch tobj := obj.(type) {
case pyNone:
_, ok := expr.(pyNone)
return newPyBool(ok)
case pyBool:
b, ok := expr.(pyBool)
return newPyBool(ok && b == tobj)
default:
return newPyBool(false)
}
}
func (s *scope) negate(obj pyObject) pyBool {
if obj.IsTruthy() {
return False
}
return True
}
func (s *scope) interpretValueExpression(expr *ValueExpression) pyObject {
obj := s.interpretValueExpressionPart(expr)
for _, sl := range expr.Slices {
if sl.Colon == "" {
// Indexing, much simpler...
s.Assert(sl.End == nil, "Invalid syntax")
obj = obj.Operator(Index, s.interpretExpression(sl.Start))
} else {
obj = s.interpretSlice(obj, sl)
}
}
if expr.Property != nil {
obj = s.interpretIdent(obj.Property(expr.Property.Name), expr.Property)
} else if expr.Call != nil {
obj = s.callObject("", obj, expr.Call)
}
return obj
}
func (s *scope) interpretValueExpressionPart(expr *ValueExpression) pyObject {
if expr.Ident != nil {
obj := s.Lookup(expr.Ident.Name)
if len(expr.Ident.Action) == 0 {
return obj // fast path
}
return s.interpretIdent(obj, expr.Ident)
} else if expr.String != "" {
// Strings are surrounded by quotes to make it easier for the parser; here they come off again.
return pyString(stringLiteral(expr.String))
} else if expr.FString != nil {
return s.interpretFString(expr.FString)
} else if expr.IsInt {
return pyInt(expr.Int)
} else if expr.True {
return True
} else if expr.False {
return False
} else if expr.None {
return None
} else if expr.List != nil {
return s.interpretList(expr.List)
} else if expr.Dict != nil {
return s.interpretDict(expr.Dict)
} else if expr.Tuple != nil {
// Parentheses can also indicate precedence; a single parenthesised expression does not create a list object.
l := s.interpretList(expr.Tuple)
if len(l) == 1 && expr.Tuple.Comprehension == nil {
return l[0]
}
return l
} else if expr.Lambda != nil {
// A lambda is just an inline function definition with a single return statement.
stmt := &Statement{}
stmt.Return = &ReturnStatement{
Values: []*Expression{&expr.Lambda.Expr},
}
return newPyFunc(s, &FuncDef{
Name: "<lambda>",
Arguments: expr.Lambda.Arguments,
Statements: []*Statement{stmt},
})
}
return None
}
func (s *scope) interpretFString(f *FString) pyObject {
stringVar := func(v FStringVar) string {
if v.Config != "" {
return s.config.MustGet(v.Config).String()
}
return s.Lookup(v.Var).String()
}
var b strings.Builder
size := len(f.Suffix)
for _, v := range f.Vars {
size += len(v.Prefix) + len(stringVar(v))
}
b.Grow(size)
for _, v := range f.Vars {
b.WriteString(v.Prefix)
b.WriteString(stringVar(v))
}
b.WriteString(f.Suffix)
return pyString(b.String())
}
func (s *scope) interpretSlice(obj pyObject, sl *Slice) pyObject {
start := s.interpretSliceExpression(obj, sl.Start, 0)
switch t := obj.(type) {
case pyList:
end := s.interpretSliceExpression(obj, sl.End, pyInt(len(t)))
return t[start:end]
case pyString:
end := s.interpretSliceExpression(obj, sl.End, pyInt(len(t)))
return t[start:end]
}
s.Error("Unsliceable type %s", obj.Type())
return nil
}
// interpretSliceExpression interprets one of the begin or end parts of a slice.
// expr may be null, if it is the value of def is used instead.
func (s *scope) interpretSliceExpression(obj pyObject, expr *Expression, def pyInt) pyInt {
if expr == nil {
return def
}
return pyIndex(obj, s.interpretExpression(expr), true)
}
func (s *scope) interpretIdent(obj pyObject, expr *IdentExpr) pyObject {
name := expr.Name
for _, action := range expr.Action {
if action.Property != nil {
name = action.Property.Name
obj = s.interpretIdent(obj.Property(name), action.Property)
} else if action.Call != nil {
obj = s.callObject(name, obj, action.Call)
}
}
return obj
}
func (s *scope) interpretIdentStatement(stmt *IdentStatement) pyObject {
if stmt.Index != nil {
// Need to special-case these, because types are immutable so we can't return a modifiable reference to them.
obj := s.Lookup(stmt.Name)
idx := s.interpretExpression(stmt.Index.Expr)
if stmt.Index.Assign != nil {
obj.IndexAssign(idx, s.interpretExpression(stmt.Index.Assign))
} else {
obj.IndexAssign(idx, obj.Operator(Index, idx).Operator(Add, s.interpretExpression(stmt.Index.AugAssign)))
}
} else if stmt.Unpack != nil {
obj := s.interpretExpression(stmt.Unpack.Expr)
l, ok := obj.(pyList)
s.Assert(ok, "Cannot unpack type %s", l.Type())
// This is a little awkward because the first item here is the name of the ident node.
s.Assert(len(l) == len(stmt.Unpack.Names)+1, "Wrong number of items to unpack; expected %d, got %d", len(stmt.Unpack.Names)+1, len(l))
s.Set(stmt.Name, l[0])
for i, name := range stmt.Unpack.Names {
s.Set(name, l[i+1])
}
} else if stmt.Action != nil {
if stmt.Action.Property != nil {
return s.interpretIdent(s.Lookup(stmt.Name).Property(stmt.Action.Property.Name), stmt.Action.Property)
} else if stmt.Action.Call != nil {
return s.callObject(stmt.Name, s.Lookup(stmt.Name), stmt.Action.Call)
} else if stmt.Action.Assign != nil {
s.Set(stmt.Name, s.interpretExpression(stmt.Action.Assign))
} else if stmt.Action.AugAssign != nil {
// The only augmented assignment operation we support is +=, and it's implemented
// exactly as x += y -> x = x + y since that matches the semantics of Go types.
s.Set(stmt.Name, s.Lookup(stmt.Name).Operator(Add, s.interpretExpression(stmt.Action.AugAssign)))
}
} else {
return s.Lookup(stmt.Name)
}
return nil
}
func (s *scope) interpretList(expr *List) pyList {
if expr.Comprehension == nil {
return pyList(s.evaluateExpressions(expr.Values))
}
cs := s.NewScope()
l := s.iterate(expr.Comprehension.Expr)
ret := make(pyList, 0, len(l))
cs.evaluateComprehension(l, expr.Comprehension, func(li pyObject) {
if len(expr.Values) == 1 {
ret = append(ret, cs.interpretExpression(expr.Values[0]))
} else {
ret = append(ret, pyList(cs.evaluateExpressions(expr.Values)))
}
})
return ret
}
func (s *scope) interpretDict(expr *Dict) pyObject {
if expr.Comprehension == nil {
d := make(pyDict, len(expr.Items))
for _, v := range expr.Items {
d.IndexAssign(s.interpretExpression(&v.Key), s.interpretExpression(&v.Value))
}
return d
}
cs := s.NewScope()
l := cs.iterate(expr.Comprehension.Expr)
ret := make(pyDict, len(l))
cs.evaluateComprehension(l, expr.Comprehension, func(li pyObject) {
ret.IndexAssign(cs.interpretExpression(&expr.Items[0].Key), cs.interpretExpression(&expr.Items[0].Value))
})
return ret
}
// evaluateComprehension handles iterating a comprehension's loops.
// The provided callback function is called with each item to be added to the result.
func (s *scope) evaluateComprehension(l pyList, comp *Comprehension, callback func(pyObject)) {
if comp.Second != nil {
for _, li := range l {
s.unpackNames(comp.Names, li)
for _, li := range s.iterate(comp.Second.Expr) {
if s.evaluateComprehensionExpression(comp, comp.Second.Names, li) {
callback(li)
}
}
}
} else {
for _, li := range l {
if s.evaluateComprehensionExpression(comp, comp.Names, li) {
callback(li)
}
}
}
}
// evaluateComprehensionExpression runs an expression from a list or dict comprehension, and returns true if the caller
// should continue to use it, or false if it's been filtered out of the comprehension.
func (s *scope) evaluateComprehensionExpression(comp *Comprehension, names []string, li pyObject) bool {
s.unpackNames(names, li)
return comp.If == nil || s.interpretExpression(comp.If).IsTruthy()
}
// unpackNames unpacks the given object into this scope.
func (s *scope) unpackNames(names []string, obj pyObject) {
if len(names) == 1 {
s.Set(names[0], obj)
} else {
l, ok := obj.(pyList)
s.Assert(ok, "Cannot unpack %s into %s", obj.Type(), names)
s.Assert(len(l) == len(names), "Incorrect number of values to unpack; expected %d, got %d", len(names), len(l))
for i, name := range names {
s.Set(name, l[i])
}
}
}
// iterate returns the result of the given expression as a pyList, which is our only iterable type.
func (s *scope) iterate(expr *Expression) pyList {
o := s.interpretExpression(expr)
l, ok := o.(pyList)
if !ok {
if l, ok := o.(pyFrozenList); ok {
return l.pyList
}
}
s.Assert(ok, "Non-iterable type %s; must be a list", o.Type())
return l
}
// evaluateExpressions runs a series of Python expressions in this scope and creates a series of concrete objects from them.
func (s *scope) evaluateExpressions(exprs []*Expression) []pyObject {
l := make(pyList, len(exprs))
for i, v := range exprs {
l[i] = s.interpretExpression(v)
}
return l
}
// stringLiteral converts a parsed string literal (which is still surrounded by quotes) to an unquoted version.
func stringLiteral(s string) string {
return s[1 : len(s)-1]
}
// callObject attempts to call the given object
func (s *scope) callObject(name string, obj pyObject, c *Call) pyObject {
// We only allow function objects to be called, so don't bother making it part of the pyObject interface.
f, ok := obj.(*pyFunc)
if !ok {
s.Error("Non-callable object '%s' (is a %s)", name, obj.Type())
}
if !s.interpreter.profiling {
return f.Call(s.ctx, s, c)
}
// If the CPU profiler is being run, attach the name of the current function in context.
var ret pyObject
pprof.Do(s.ctx, pprof.Labels("asp:func", f.name), func(ctx context.Context) {
ret = f.Call(ctx, s, c)
})
return ret
}
// Constant returns an object from an expression that describes a constant,
// e.g. None, "string", 42, [], etc. It returns nil if the expression cannot be determined to be constant.
func (s *scope) Constant(expr *Expression) pyObject {
// Technically some of these might be constant (e.g. 'a,b,c'.split(',') or `1 if True else 2`.
// That's probably unlikely to be common though - we could do a generalised constant-folding pass
// but it's rare that people would write something of that nature in this language.
if expr.Optimised != nil && expr.Optimised.Constant != nil {
return expr.Optimised.Constant
} else if expr.Val == nil || len(expr.Val.Slices) != 0 || expr.Val.Property != nil || expr.Val.Call != nil || expr.Op != nil || expr.If != nil {
return nil
} else if expr.Val.True || expr.Val.False || expr.Val.None || expr.Val.IsInt || expr.Val.String != "" {
return s.interpretValueExpression(expr.Val)
} else if expr.Val.List != nil && expr.Val.List.Comprehension == nil {
// Lists can be constant if all their elements are also.
for _, v := range expr.Val.List.Values {
if s.Constant(v) == nil {
return nil
}
}
return s.interpretValueExpression(expr.Val)
} else if expr.Val.FString != nil && len(expr.Val.FString.Vars) == 0 {
return pyString(expr.Val.FString.Suffix)
}
// N.B. dicts are not optimised to constants currently because they are mutable (because Go maps have
// pointer semantics). It might be nice to be able to do that later but it is probably not critical -
// we might also be able to do a more aggressive pass in cases where we know we're passing a constant
// to a builtin that won't modify it (e.g. calling build_rule with a constant dict).
return nil
}
// pkgFilename returns the filename of the current package, or the empty string if there is none.
func (s *scope) pkgFilename() string {
if s.pkg != nil {
return s.pkg.Filename
}
return ""
}
| 1 | 10,020 | this got me a little confused when reading `build.log`. The err is printed with a log.Error later on but that ends up after the stack trace. | thought-machine-please | go |
@@ -157,6 +157,7 @@ std::string FlatCompiler::GetUsageString(const char *program_name) const {
" --include-prefix Prefix this path to any generated include statements.\n"
" PATH\n"
" --keep-prefix Keep original prefix of schema include statement.\n"
+ " --keep-namespaces Keep full namespace paths of imported types in Rust.\n"
" --no-fb-import Don't include flatbuffers import statement for TypeScript.\n"
" --no-ts-reexport Don't re-export imported dependencies for TypeScript.\n"
" --short-names Use short function names for JS and TypeScript.\n" | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flatbuffers/flatc.h"
#include <list>
namespace flatbuffers {
const char *FLATC_VERSION() { return FLATBUFFERS_VERSION(); }
void FlatCompiler::ParseFile(
flatbuffers::Parser &parser, const std::string &filename,
const std::string &contents,
std::vector<const char *> &include_directories) const {
auto local_include_directory = flatbuffers::StripFileName(filename);
include_directories.push_back(local_include_directory.c_str());
include_directories.push_back(nullptr);
if (!parser.Parse(contents.c_str(), &include_directories[0],
filename.c_str())) {
Error(parser.error_, false, false);
}
if (!parser.error_.empty()) { Warn(parser.error_, false); }
include_directories.pop_back();
include_directories.pop_back();
}
void FlatCompiler::LoadBinarySchema(flatbuffers::Parser &parser,
const std::string &filename,
const std::string &contents) {
if (!parser.Deserialize(reinterpret_cast<const uint8_t *>(contents.c_str()),
contents.size())) {
Error("failed to load binary schema: " + filename, false, false);
}
}
void FlatCompiler::Warn(const std::string &warn, bool show_exe_name) const {
params_.warn_fn(this, warn, show_exe_name);
}
void FlatCompiler::Error(const std::string &err, bool usage,
bool show_exe_name) const {
params_.error_fn(this, err, usage, show_exe_name);
}
std::string FlatCompiler::GetUsageString(const char *program_name) const {
std::stringstream ss;
ss << "Usage: " << program_name << " [OPTION]... FILE... [-- FILE...]\n";
for (size_t i = 0; i < params_.num_generators; ++i) {
const Generator &g = params_.generators[i];
std::stringstream full_name;
full_name << std::setw(16) << std::left << g.generator_opt_long;
const char *name = g.generator_opt_short ? g.generator_opt_short : " ";
const char *help = g.generator_help;
ss << " " << full_name.str() << " " << name << " " << help << ".\n";
}
// clang-format off
// Output width
// 12345678901234567890123456789012345678901234567890123456789012345678901234567890
ss <<
" -o PATH Prefix PATH to all generated files.\n"
" -I PATH Search for includes in the specified path.\n"
" -M Print make rules for generated files.\n"
" --version Print the version number of flatc and exit.\n"
" --strict-json Strict JSON: field names must be / will be quoted,\n"
" no trailing commas in tables/vectors.\n"
" --allow-non-utf8 Pass non-UTF-8 input through parser and emit nonstandard\n"
" \\x escapes in JSON. (Default is to raise parse error on\n"
" non-UTF-8 input.)\n"
" --natural-utf8 Output strings with UTF-8 as human-readable strings.\n"
" By default, UTF-8 characters are printed as \\uXXXX escapes.\n"
" --defaults-json Output fields whose value is the default when\n"
" writing JSON\n"
" --unknown-json Allow fields in JSON that are not defined in the\n"
" schema. These fields will be discared when generating\n"
" binaries.\n"
" --no-prefix Don\'t prefix enum values with the enum type in C++.\n"
" --scoped-enums Use C++11 style scoped and strongly typed enums.\n"
" also implies --no-prefix.\n"
" --gen-includes (deprecated), this is the default behavior.\n"
" If the original behavior is required (no include\n"
" statements) use --no-includes.\n"
" --no-includes Don\'t generate include statements for included\n"
" schemas the generated file depends on (C++ / Python).\n"
" --gen-mutable Generate accessors that can mutate buffers in-place.\n"
" --gen-onefile Generate single output file for C# and Go.\n"
" --gen-name-strings Generate type name functions for C++ and Rust.\n"
" --gen-object-api Generate an additional object-based API.\n"
" --gen-compare Generate operator== for object-based API types.\n"
" --gen-nullable Add Clang _Nullable for C++ pointer. or @Nullable for Java\n"
" --java-checkerframe work Add @Pure for Java.\n"
" --gen-generated Add @Generated annotation for Java\n"
" --gen-jvmstatic Add @JvmStatic annotation for Kotlin methods\n"
" in companion object for interop from Java to Kotlin.\n"
" --gen-all Generate not just code for the current schema files,\n"
" but for all files it includes as well.\n"
" If the language uses a single file for output (by default\n"
" the case for C++ and JS), all code will end up in this one\n"
" file.\n"
" --cpp-include Adds an #include in generated file.\n"
" --cpp-ptr-type T Set object API pointer type (default std::unique_ptr).\n"
" --cpp-str-type T Set object API string type (default std::string).\n"
" T::c_str(), T::length() and T::empty() must be supported.\n"
" The custom type also needs to be constructible from std::string\n"
" (see the --cpp-str-flex-ctor option to change this behavior).\n"
" --cpp-str-flex-ctor Don't construct custom string types by passing std::string\n"
" from Flatbuffers, but (char* + length).\n"
" --cpp-std CPP_STD Generate a C++ code using features of selected C++ standard.\n"
" Supported CPP_STD values:\n"
" * 'c++0x' - generate code compatible with old compilers;\n"
" * 'c++11' - use C++11 code generator (default);\n"
" * 'c++17' - use C++17 features in generated code (experimental).\n"
" --object-prefix Customise class prefix for C++ object-based API.\n"
" --object-suffix Customise class suffix for C++ object-based API.\n"
" Default value is \"T\".\n"
" --no-js-exports Removes Node.js style export lines in JS.\n"
" --goog-js-export Uses goog.exports* for closure compiler exporting in JS.\n"
" --es6-js-export Uses ECMAScript 6 export style lines in JS.\n"
" --go-namespace Generate the overrided namespace in Golang.\n"
" --go-import Generate the overrided import for flatbuffers in Golang\n"
" (default is \"github.com/google/flatbuffers/go\").\n"
" --raw-binary Allow binaries without file_indentifier to be read.\n"
" This may crash flatc given a mismatched schema.\n"
" --size-prefixed Input binaries are size prefixed buffers.\n"
" --proto Input is a .proto, translate to .fbs.\n"
" --proto-namespace-suffix Add this namespace to any flatbuffers generated\n"
" SUFFIX from protobufs.\n"
" --oneof-union Translate .proto oneofs to flatbuffer unions.\n"
" --grpc Generate GRPC interfaces for the specified languages.\n"
" --schema Serialize schemas instead of JSON (use with -b).\n"
" --bfbs-comments Add doc comments to the binary schema files.\n"
" --bfbs-builtins Add builtin attributes to the binary schema files.\n"
" --bfbs-gen-embed Generate code to embed the bfbs schema to the source.\n"
" --conform FILE Specify a schema the following schemas should be\n"
" an evolution of. Gives errors if not.\n"
" --conform-includes Include path for the schema given with --conform PATH\n"
" --filename-suffix The suffix appended to the generated file names.\n"
" Default is '_generated'.\n"
" --filename-ext The extension appended to the generated file names.\n"
" Default is language-specific (e.g., '.h' for C++)\n"
" --include-prefix Prefix this path to any generated include statements.\n"
" PATH\n"
" --keep-prefix Keep original prefix of schema include statement.\n"
" --no-fb-import Don't include flatbuffers import statement for TypeScript.\n"
" --no-ts-reexport Don't re-export imported dependencies for TypeScript.\n"
" --short-names Use short function names for JS and TypeScript.\n"
" --reflect-types Add minimal type reflection to code generation.\n"
" --reflect-names Add minimal type/name reflection.\n"
" --root-type T Select or override the default root_type\n"
" --force-defaults Emit default values in binary output from JSON\n"
" --force-empty When serializing from object API representation,\n"
" force strings and vectors to empty rather than null.\n"
" --force-empty-vectors When serializing from object API representation,\n"
" force vectors to empty rather than null.\n"
" --flexbuffers Used with \"binary\" and \"json\" options, it generates\n"
" data using schema-less FlexBuffers.\n"
"FILEs may be schemas (must end in .fbs), binary schemas (must end in .bfbs),\n"
"or JSON files (conforming to preceding schema). FILEs after the -- must be\n"
"binary flatbuffer format files.\n"
"Output files are named using the base file name of the input,\n"
"and written to the current directory or the path given by -o.\n"
"example: " << program_name << " -c -b schema1.fbs schema2.fbs data.json\n";
// 12345678901234567890123456789012345678901234567890123456789012345678901234567890
// clang-format on
return ss.str();
}
int FlatCompiler::Compile(int argc, const char **argv) {
if (params_.generators == nullptr || params_.num_generators == 0) {
return 0;
}
flatbuffers::IDLOptions opts;
std::string output_path;
bool any_generator = false;
bool print_make_rules = false;
bool raw_binary = false;
bool schema_binary = false;
bool grpc_enabled = false;
std::vector<std::string> filenames;
std::list<std::string> include_directories_storage;
std::vector<const char *> include_directories;
std::vector<const char *> conform_include_directories;
std::vector<bool> generator_enabled(params_.num_generators, false);
size_t binary_files_from = std::numeric_limits<size_t>::max();
std::string conform_to_schema;
for (int argi = 0; argi < argc; argi++) {
std::string arg = argv[argi];
if (arg[0] == '-') {
if (filenames.size() && arg[1] != '-')
Error("invalid option location: " + arg, true);
if (arg == "-o") {
if (++argi >= argc) Error("missing path following: " + arg, true);
output_path = flatbuffers::ConCatPathFileName(
flatbuffers::PosixPath(argv[argi]), "");
} else if (arg == "-I") {
if (++argi >= argc) Error("missing path following: " + arg, true);
include_directories_storage.push_back(
flatbuffers::PosixPath(argv[argi]));
include_directories.push_back(
include_directories_storage.back().c_str());
} else if (arg == "--conform") {
if (++argi >= argc) Error("missing path following: " + arg, true);
conform_to_schema = flatbuffers::PosixPath(argv[argi]);
} else if (arg == "--conform-includes") {
if (++argi >= argc) Error("missing path following: " + arg, true);
include_directories_storage.push_back(
flatbuffers::PosixPath(argv[argi]));
conform_include_directories.push_back(
include_directories_storage.back().c_str());
} else if (arg == "--include-prefix") {
if (++argi >= argc) Error("missing path following: " + arg, true);
opts.include_prefix = flatbuffers::ConCatPathFileName(
flatbuffers::PosixPath(argv[argi]), "");
} else if (arg == "--keep-prefix") {
opts.keep_include_path = true;
} else if (arg == "--strict-json") {
opts.strict_json = true;
} else if (arg == "--allow-non-utf8") {
opts.allow_non_utf8 = true;
} else if (arg == "--natural-utf8") {
opts.natural_utf8 = true;
} else if (arg == "--no-js-exports") {
opts.skip_js_exports = true;
} else if (arg == "--goog-js-export") {
opts.use_goog_js_export_format = true;
opts.use_ES6_js_export_format = false;
} else if (arg == "--es6-js-export") {
opts.use_goog_js_export_format = false;
opts.use_ES6_js_export_format = true;
} else if (arg == "--go-namespace") {
if (++argi >= argc) Error("missing golang namespace" + arg, true);
opts.go_namespace = argv[argi];
} else if (arg == "--go-import") {
if (++argi >= argc) Error("missing golang import" + arg, true);
opts.go_import = argv[argi];
} else if (arg == "--defaults-json") {
opts.output_default_scalars_in_json = true;
} else if (arg == "--unknown-json") {
opts.skip_unexpected_fields_in_json = true;
} else if (arg == "--no-prefix") {
opts.prefixed_enums = false;
} else if (arg == "--scoped-enums") {
opts.prefixed_enums = false;
opts.scoped_enums = true;
} else if (arg == "--no-union-value-namespacing") {
opts.union_value_namespacing = false;
} else if (arg == "--gen-mutable") {
opts.mutable_buffer = true;
} else if (arg == "--gen-name-strings") {
opts.generate_name_strings = true;
} else if (arg == "--gen-object-api") {
opts.generate_object_based_api = true;
} else if (arg == "--gen-compare") {
opts.gen_compare = true;
} else if (arg == "--cpp-include") {
if (++argi >= argc) Error("missing include following: " + arg, true);
opts.cpp_includes.push_back(argv[argi]);
} else if (arg == "--cpp-ptr-type") {
if (++argi >= argc) Error("missing type following: " + arg, true);
opts.cpp_object_api_pointer_type = argv[argi];
} else if (arg == "--cpp-str-type") {
if (++argi >= argc) Error("missing type following: " + arg, true);
opts.cpp_object_api_string_type = argv[argi];
} else if (arg == "--cpp-str-flex-ctor") {
opts.cpp_object_api_string_flexible_constructor = true;
} else if (arg == "--gen-nullable") {
opts.gen_nullable = true;
} else if (arg == "--java-checkerframework") {
opts.java_checkerframework = true;
} else if (arg == "--gen-generated") {
opts.gen_generated = true;
} else if (arg == "--object-prefix") {
if (++argi >= argc) Error("missing prefix following: " + arg, true);
opts.object_prefix = argv[argi];
} else if (arg == "--object-suffix") {
if (++argi >= argc) Error("missing suffix following: " + arg, true);
opts.object_suffix = argv[argi];
} else if (arg == "--gen-all") {
opts.generate_all = true;
opts.include_dependence_headers = false;
opts.reexport_ts_modules = false;
} else if (arg == "--gen-includes") {
// Deprecated, remove this option some time in the future.
Warn("warning: --gen-includes is deprecated (it is now default)\n");
} else if (arg == "--no-includes") {
opts.include_dependence_headers = false;
} else if (arg == "--gen-onefile") {
opts.one_file = true;
} else if (arg == "--raw-binary") {
raw_binary = true;
} else if (arg == "--size-prefixed") {
opts.size_prefixed = true;
} else if (arg == "--") { // Separator between text and binary inputs.
binary_files_from = filenames.size();
} else if (arg == "--proto") {
opts.proto_mode = true;
} else if (arg == "--proto-namespace-suffix") {
if (++argi >= argc) Error("missing namespace suffix" + arg, true);
opts.proto_namespace_suffix = argv[argi];
} else if (arg == "--oneof-union") {
opts.proto_oneof_union = true;
} else if (arg == "--schema") {
schema_binary = true;
} else if (arg == "-M") {
print_make_rules = true;
} else if (arg == "--version") {
printf("flatc version %s\n", FLATC_VERSION());
exit(0);
} else if (arg == "--grpc") {
grpc_enabled = true;
} else if (arg == "--bfbs-comments") {
opts.binary_schema_comments = true;
} else if (arg == "--bfbs-builtins") {
opts.binary_schema_builtins = true;
} else if (arg == "--bfbs-gen-embed") {
opts.binary_schema_gen_embed = true;
} else if (arg == "--no-fb-import") {
opts.skip_flatbuffers_import = true;
} else if (arg == "--no-ts-reexport") {
opts.reexport_ts_modules = false;
} else if (arg == "--short-names") {
opts.js_ts_short_names = true;
} else if (arg == "--reflect-types") {
opts.mini_reflect = IDLOptions::kTypes;
} else if (arg == "--reflect-names") {
opts.mini_reflect = IDLOptions::kTypesAndNames;
} else if (arg == "--root-type") {
if (++argi >= argc) Error("missing type following: " + arg, true);
opts.root_type = argv[argi];
} else if (arg == "--filename-suffix") {
if (++argi >= argc) Error("missing filename suffix: " + arg, true);
opts.filename_suffix = argv[argi];
} else if (arg == "--filename-ext") {
if (++argi >= argc) Error("missing filename extension: " + arg, true);
opts.filename_extension = argv[argi];
} else if (arg == "--force-defaults") {
opts.force_defaults = true;
} else if (arg == "--force-empty") {
opts.set_empty_strings_to_null = false;
opts.set_empty_vectors_to_null = false;
} else if (arg == "--force-empty-vectors") {
opts.set_empty_vectors_to_null = false;
} else if (arg == "--java-primitive-has-method") {
opts.java_primitive_has_method = true;
} else if (arg == "--cs-gen-json-serializer") {
opts.cs_gen_json_serializer = true;
} else if (arg == "--flexbuffers") {
opts.use_flexbuffers = true;
} else if (arg == "--gen-jvmstatic") {
opts.gen_jvmstatic = true;
} else if (arg == "--cpp-std") {
if (++argi >= argc)
Error("missing C++ standard specification" + arg, true);
opts.cpp_std = argv[argi];
} else {
for (size_t i = 0; i < params_.num_generators; ++i) {
if (arg == params_.generators[i].generator_opt_long ||
(params_.generators[i].generator_opt_short &&
arg == params_.generators[i].generator_opt_short)) {
generator_enabled[i] = true;
any_generator = true;
opts.lang_to_generate |= params_.generators[i].lang;
goto found;
}
}
Error("unknown commandline argument: " + arg, true);
found:;
}
} else {
filenames.push_back(flatbuffers::PosixPath(argv[argi]));
}
}
if (!filenames.size()) Error("missing input files", false, true);
if (opts.proto_mode) {
if (any_generator)
Error("cannot generate code directly from .proto files", true);
} else if (!any_generator && conform_to_schema.empty()) {
Error("no options: specify at least one generator.", true);
}
flatbuffers::Parser conform_parser;
if (!conform_to_schema.empty()) {
std::string contents;
if (!flatbuffers::LoadFile(conform_to_schema.c_str(), true, &contents))
Error("unable to load schema: " + conform_to_schema);
if (flatbuffers::GetExtension(conform_to_schema) ==
reflection::SchemaExtension()) {
LoadBinarySchema(conform_parser, conform_to_schema, contents);
} else {
ParseFile(conform_parser, conform_to_schema, contents,
conform_include_directories);
}
}
std::unique_ptr<flatbuffers::Parser> parser(new flatbuffers::Parser(opts));
for (auto file_it = filenames.begin(); file_it != filenames.end();
++file_it) {
auto &filename = *file_it;
std::string contents;
if (!flatbuffers::LoadFile(filename.c_str(), true, &contents))
Error("unable to load file: " + filename);
bool is_binary =
static_cast<size_t>(file_it - filenames.begin()) >= binary_files_from;
auto ext = flatbuffers::GetExtension(filename);
auto is_schema = ext == "fbs" || ext == "proto";
auto is_binary_schema = ext == reflection::SchemaExtension();
if (is_binary) {
parser->builder_.Clear();
parser->builder_.PushFlatBuffer(
reinterpret_cast<const uint8_t *>(contents.c_str()),
contents.length());
if (!raw_binary) {
// Generally reading binaries that do not correspond to the schema
// will crash, and sadly there's no way around that when the binary
// does not contain a file identifier.
// We'd expect that typically any binary used as a file would have
// such an identifier, so by default we require them to match.
if (!parser->file_identifier_.length()) {
Error("current schema has no file_identifier: cannot test if \"" +
filename +
"\" matches the schema, use --raw-binary to read this file"
" anyway.");
} else if (!flatbuffers::BufferHasIdentifier(
contents.c_str(), parser->file_identifier_.c_str(),
opts.size_prefixed)) {
Error("binary \"" + filename +
"\" does not have expected file_identifier \"" +
parser->file_identifier_ +
"\", use --raw-binary to read this file anyway.");
}
}
} else {
// Check if file contains 0 bytes.
if (!opts.use_flexbuffers && !is_binary_schema &&
contents.length() != strlen(contents.c_str())) {
Error("input file appears to be binary: " + filename, true);
}
if (is_schema) {
// If we're processing multiple schemas, make sure to start each
// one from scratch. If it depends on previous schemas it must do
// so explicitly using an include.
parser.reset(new flatbuffers::Parser(opts));
}
if (is_binary_schema) {
LoadBinarySchema(*parser.get(), filename, contents);
}
if (opts.use_flexbuffers) {
if (opts.lang_to_generate == IDLOptions::kJson) {
parser->flex_root_ = flexbuffers::GetRoot(
reinterpret_cast<const uint8_t *>(contents.c_str()),
contents.size());
} else {
parser->flex_builder_.Clear();
ParseFile(*parser.get(), filename, contents, include_directories);
}
} else {
ParseFile(*parser.get(), filename, contents, include_directories);
if (!is_schema && !parser->builder_.GetSize()) {
// If a file doesn't end in .fbs, it must be json/binary. Ensure we
// didn't just parse a schema with a different extension.
Error("input file is neither json nor a .fbs (schema) file: " +
filename,
true);
}
}
if ((is_schema || is_binary_schema) && !conform_to_schema.empty()) {
auto err = parser->ConformTo(conform_parser);
if (!err.empty()) Error("schemas don\'t conform: " + err);
}
if (schema_binary || opts.binary_schema_gen_embed) {
parser->Serialize();
}
if (schema_binary) {
parser->file_extension_ = reflection::SchemaExtension();
}
}
std::string filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(filename));
for (size_t i = 0; i < params_.num_generators; ++i) {
parser->opts.lang = params_.generators[i].lang;
if (generator_enabled[i]) {
if (!print_make_rules) {
flatbuffers::EnsureDirExists(output_path);
if ((!params_.generators[i].schema_only ||
(is_schema || is_binary_schema)) &&
!params_.generators[i].generate(*parser.get(), output_path,
filebase)) {
Error(std::string("Unable to generate ") +
params_.generators[i].lang_name + " for " + filebase);
}
} else {
if (params_.generators[i].make_rule == nullptr) {
Error(std::string("Cannot generate make rule for ") +
params_.generators[i].lang_name);
} else {
std::string make_rule = params_.generators[i].make_rule(
*parser.get(), output_path, filename);
if (!make_rule.empty())
printf("%s\n",
flatbuffers::WordWrap(make_rule, 80, " ", " \\").c_str());
}
}
if (grpc_enabled) {
if (params_.generators[i].generateGRPC != nullptr) {
if (!params_.generators[i].generateGRPC(*parser.get(), output_path,
filebase)) {
Error(std::string("Unable to generate GRPC interface for") +
params_.generators[i].lang_name);
}
} else {
Warn(std::string("GRPC interface generator not implemented for ") +
params_.generators[i].lang_name);
}
}
}
}
if (!opts.root_type.empty()) {
if (!parser->SetRootType(opts.root_type.c_str()))
Error("unknown root type: " + opts.root_type);
else if (parser->root_struct_def_->fixed)
Error("root type must be a table");
}
if (opts.proto_mode) GenerateFBS(*parser.get(), output_path, filebase);
// We do not want to generate code for the definitions in this file
// in any files coming up next.
parser->MarkGenerated();
}
return 0;
}
} // namespace flatbuffers
| 1 | 19,210 | Can we make these more explicitly for Rust only? It seems like JS and Go use flags with their name in them. | google-flatbuffers | java |
@@ -88,6 +88,9 @@ type URLOpener struct{}
// - file://localhost/c:/foo/bar
// -> Also passes "c:\foo\bar".
func (*URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
+ for param := range u.Query() {
+ return nil, fmt.Errorf("open bucket %q: invalid query parameter %q", u, param)
+ }
return OpenBucket(mungeURLPath(u.Path, os.PathSeparator), nil)
}
| 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileblob provides a blob implementation that uses the filesystem.
// Use OpenBucket to construct a *blob.Bucket.
//
// URLs
//
// For blob.OpenBucket URLs, fileblob registers for the scheme "file"; URLs
// start with "file://" like "file:///path/to/directory". For full details, see
// URLOpener.
//
// Escaping
//
// Go CDK supports all UTF-8 strings; to make this work with providers lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for fileblob:
// - Blob keys: ASCII characters 0-31 are escaped to "__0x<hex>__".
// If os.PathSeparator != "/", it is also escaped.
// Additionally, the "/" in "../", the trailing "/" in "//", and a trailing
// "/" is key names are escaped in the same way.
//
// As
//
// fileblob exposes the following types for As:
// - Error: *os.PathError
package fileblob // import "gocloud.dev/blob/fileblob"
import (
"context"
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/escape"
)
const defaultPageSize = 1000
func init() {
blob.DefaultURLMux().RegisterBucket(Scheme, &URLOpener{})
}
// Scheme is the URL scheme fileblob registers its URLOpener under on
// blob.DefaultMux.
const Scheme = "file"
// URLOpener opens file bucket URLs like "file:///foo/bar/baz".
type URLOpener struct{}
// OpenBucketURL opens the file bucket at the URL's path. The URL's host is
// ignored. If os.PathSeparator != "/", any leading "/" from the path is dropped
// and remaining '/' characters are converted to os.PathSeparator.
// No query options are supported. Examples:
//
// - file:///a/directory
// -> Passes "/a/directory" to OpenBucket.
// - file://localhost/a/directory
// -> Also passes "/a/directory".
// - file:///c:/foo/bar
// -> Passes "c:\foo\bar".
// - file://localhost/c:/foo/bar
// -> Also passes "c:\foo\bar".
func (*URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
return OpenBucket(mungeURLPath(u.Path, os.PathSeparator), nil)
}
func mungeURLPath(path string, pathSeparator byte) string {
if pathSeparator != '/' {
path = strings.TrimPrefix(path, "/")
// TODO: use filepath.FromSlash instead; and remove the pathSeparator arg
// from this function. Test Windows behavior by opening a bucket on Windows.
// See #1075 for why Windows is disabled.
return strings.Replace(path, "/", string(pathSeparator), -1)
}
return path
}
// Options sets options for constructing a *blob.Bucket backed by fileblob.
type Options struct {
// URLSigner implements signing URLs (to allow access to a resource without
// further authorization) and verifying that a given URL is unexpired and
// contains a signature produced by the URLSigner.
// URLSigner is only required for utilizing the SignedURL API.
URLSigner URLSigner
}
type bucket struct {
dir string
opts *Options
}
// openBucket creates a driver.Bucket that reads and writes to dir.
// dir must exist.
func openBucket(dir string, opts *Options) (driver.Bucket, error) {
dir = filepath.Clean(dir)
info, err := os.Stat(dir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%s is not a directory", dir)
}
return &bucket{dir: dir, opts: opts}, nil
}
// OpenBucket creates a *blob.Bucket backed by the filesystem and rooted at
// dir, which must exist. See the package documentation for an example.
func OpenBucket(dir string, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(dir, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// escapeKey does all required escaping for UTF-8 strings to work the filesystem.
func escapeKey(s string) string {
s = escape.HexEscape(s, func(r []rune, i int) bool {
c := r[i]
switch {
case c < 32:
return true
// We're going to replace '/' with os.PathSeparator below. In order for this
// to be reversible, we need to escape raw os.PathSeparators.
case os.PathSeparator != '/' && c == os.PathSeparator:
return true
// For "../", escape the trailing slash.
case i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
// For "//", escape the trailing slash.
case i > 0 && c == '/' && r[i-1] == '/':
return true
// Escape the trailing slash in a key.
case c == '/' && i == len(r)-1:
return true
}
return false
})
// Replace "/" with os.PathSeparator if needed, so that the local filesystem
// can use subdirectories.
if os.PathSeparator != '/' {
s = strings.Replace(s, "/", string(os.PathSeparator), -1)
}
return s
}
// unescapeKey reverses escapeKey.
func unescapeKey(s string) string {
if os.PathSeparator != '/' {
s = strings.Replace(s, string(os.PathSeparator), "/", -1)
}
s = escape.HexUnescape(s)
return s
}
var errNotImplemented = errors.New("not implemented")
func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode {
switch {
case os.IsNotExist(err):
return gcerrors.NotFound
case err == errNotImplemented:
return gcerrors.Unimplemented
default:
return gcerrors.Unknown
}
}
// path returns the full path for a key
func (b *bucket) path(key string) (string, error) {
path := filepath.Join(b.dir, escapeKey(key))
if strings.HasSuffix(path, attrsExt) {
return "", errAttrsExt
}
return path, nil
}
// forKey returns the full path, os.FileInfo, and attributes for key.
func (b *bucket) forKey(key string) (string, os.FileInfo, *xattrs, error) {
path, err := b.path(key)
if err != nil {
return "", nil, nil, err
}
info, err := os.Stat(path)
if err != nil {
return "", nil, nil, err
}
xa, err := getAttrs(path)
if err != nil {
return "", nil, nil, err
}
return path, info, &xa, nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
var pageToken string
if len(opts.PageToken) > 0 {
pageToken = string(opts.PageToken)
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
// If opts.Delimiter != "", lastPrefix contains the last "directory" key we
// added. It is used to avoid adding it again; all files in this "directory"
// are collapsed to the single directory entry.
var lastPrefix string
// Do a full recursive scan of the root directory.
var result driver.ListPage
err := filepath.Walk(b.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
// Couldn't read this file/directory for some reason; just skip it.
return nil
}
// Skip the self-generated attribute files.
if strings.HasSuffix(path, attrsExt) {
return nil
}
// os.Walk returns the root directory; skip it.
if path == b.dir {
return nil
}
// Strip the <b.dir> prefix from path; +1 is to include the separator.
path = path[len(b.dir)+1:]
// Unescape the path to get the key.
key := unescapeKey(path)
// Skip all directories. If opts.Delimiter is set, we'll create
// pseudo-directories later.
// Note that returning nil means that we'll still recurse into it;
// we're just not adding a result for the directory itself.
if info.IsDir() {
key += "/"
// Avoid recursing into subdirectories if the directory name already
// doesn't match the prefix; any files in it are guaranteed not to match.
if len(key) > len(opts.Prefix) && !strings.HasPrefix(key, opts.Prefix) {
return filepath.SkipDir
}
// Similarly, avoid recursing into subdirectories if we're making
// "directories" and all of the files in this subdirectory are guaranteed
// to collapse to a "directory" that we've already added.
if lastPrefix != "" && strings.HasPrefix(key, lastPrefix) {
return filepath.SkipDir
}
return nil
}
// Skip files/directories that don't match the Prefix.
if !strings.HasPrefix(key, opts.Prefix) {
return nil
}
var md5 []byte
if xa, err := getAttrs(path); err == nil {
// Note: we only have the MD5 hash for blobs that we wrote.
// For other blobs, md5 will remain nil.
md5 = xa.MD5
}
obj := &driver.ListObject{
Key: key,
ModTime: info.ModTime(),
Size: info.Size(),
MD5: md5,
}
// If using Delimiter, collapse "directories".
if opts.Delimiter != "" {
// Strip the prefix, which may contain Delimiter.
keyWithoutPrefix := key[len(opts.Prefix):]
// See if the key still contains Delimiter.
// If no, it's a file and we just include it.
// If yes, it's a file in a "sub-directory" and we want to collapse
// all files in that "sub-directory" into a single "directory" result.
if idx := strings.Index(keyWithoutPrefix, opts.Delimiter); idx != -1 {
prefix := opts.Prefix + keyWithoutPrefix[0:idx+len(opts.Delimiter)]
// We've already included this "directory"; don't add it.
if prefix == lastPrefix {
return nil
}
// Update the object to be a "directory".
obj = &driver.ListObject{
Key: prefix,
IsDir: true,
}
lastPrefix = prefix
}
}
// If there's a pageToken, skip anything before it.
if pageToken != "" && obj.Key <= pageToken {
return nil
}
// If we've already got a full page of results, set NextPageToken and stop.
if len(result.Objects) == pageSize {
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
return io.EOF
}
result.Objects = append(result.Objects, obj)
return nil
})
if err != nil && err != io.EOF {
return nil, err
}
return &result, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool { return false }
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
if perr, ok := err.(*os.PathError); ok {
if p, ok := i.(**os.PathError); ok {
*p = perr
return true
}
}
return false
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (driver.Attributes, error) {
_, info, xa, err := b.forKey(key)
if err != nil {
return driver.Attributes{}, err
}
return driver.Attributes{
CacheControl: xa.CacheControl,
ContentDisposition: xa.ContentDisposition,
ContentEncoding: xa.ContentEncoding,
ContentLanguage: xa.ContentLanguage,
ContentType: xa.ContentType,
Metadata: xa.Metadata,
ModTime: info.ModTime(),
Size: info.Size(),
MD5: xa.MD5,
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
path, info, xa, err := b.forKey(key)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
if offset > 0 {
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
}
r := io.Reader(f)
if length >= 0 {
r = io.LimitReader(r, length)
}
return &reader{
r: r,
c: f,
attrs: driver.ReaderAttributes{
ContentType: xa.ContentType,
ModTime: info.ModTime(),
Size: info.Size(),
},
}, nil
}
type reader struct {
r io.Reader
c io.Closer
attrs driver.ReaderAttributes
}
func (r *reader) Read(p []byte) (int, error) {
if r.r == nil {
return 0, io.EOF
}
return r.r.Read(p)
}
func (r *reader) Close() error {
if r.c == nil {
return nil
}
return r.c.Close()
}
func (r *reader) Attributes() driver.ReaderAttributes {
return r.attrs
}
func (r *reader) As(i interface{}) bool { return false }
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
path, err := b.path(key)
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
return nil, err
}
f, err := ioutil.TempFile(filepath.Dir(path), "fileblob")
if err != nil {
return nil, err
}
if opts.BeforeWrite != nil {
if err := opts.BeforeWrite(func(interface{}) bool { return false }); err != nil {
return nil, err
}
}
var metadata map[string]string
if len(opts.Metadata) > 0 {
metadata = opts.Metadata
}
attrs := xattrs{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentType: contentType,
Metadata: metadata,
}
w := &writer{
ctx: ctx,
f: f,
path: path,
attrs: attrs,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
}
return w, nil
}
type writer struct {
ctx context.Context
f *os.File
path string
attrs xattrs
contentMD5 []byte
// We compute the MD5 hash so that we can store it with the file attributes,
// not for verification.
md5hash hash.Hash
}
func (w *writer) Write(p []byte) (n int, err error) {
if _, err := w.md5hash.Write(p); err != nil {
return 0, err
}
return w.f.Write(p)
}
func (w *writer) Close() error {
err := w.f.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been renamed so
// the Remove will fail.
defer func() {
_ = os.Remove(w.f.Name())
}()
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
md5sum := w.md5hash.Sum(nil)
w.attrs.MD5 = md5sum
// Write the attributes file.
if err := setAttrs(w.path, w.attrs); err != nil {
return err
}
// Rename the temp file to path.
if err := os.Rename(w.f.Name(), w.path); err != nil {
_ = os.Remove(w.path + attrsExt)
return err
}
return nil
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
path, err := b.path(key)
if err != nil {
return err
}
err = os.Remove(path)
if err != nil {
return err
}
if err = os.Remove(path + attrsExt); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// SignedURL implements driver.SignedURL
func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) {
if b.opts.URLSigner == nil {
return "", errors.New("sign fileblob url: bucket does not have an Options.URLSigner")
}
surl, err := b.opts.URLSigner.URLFromKey(ctx, key, opts)
if err != nil {
return "", err
}
return surl.String(), nil
}
// URLSigner defines an interface for creating and verifying a signed URL for
// objects in a fileblob bucket. Signed URLs are typically used for granting
// access to an otherwise-protected resource without requiring further
// authentication, and callers should take care to restrict the creation of
// signed URLs as is appropriate for their application.
type URLSigner interface {
// URLFromKey defines how the bucket's object key will be turned
// into a signed URL. URLFromKey must be safe to call from multiple goroutines.
URLFromKey(ctx context.Context, key string, opts *driver.SignedURLOptions) (*url.URL, error)
// KeyFromURL must be able to validate a URL returned from URLFromKey.
// KeyFromURL must only return the object if if the URL is
// both unexpired and authentic. KeyFromURL must be safe to call from
// multiple goroutines. Implementations of KeyFromURL should not modify
// the URL argument.
KeyFromURL(ctx context.Context, surl *url.URL) (string, error)
}
// URLSignerHMAC signs URLs by adding the object key, expiration time, and a
// hash-based message authentication code (HMAC) into the query parameters.
// Values of URLSignerHMAC with the same secret key will accept URLs produced by
// others as valid.
type URLSignerHMAC struct {
baseURL *url.URL
secretKey []byte
}
// NewURLSignerHMAC creates a URLSignerHMAC. If the secret key is empty,
// then NewURLSignerHMAC panics.
func NewURLSignerHMAC(baseURL *url.URL, secretKey []byte) *URLSignerHMAC {
if len(secretKey) == 0 {
panic("creating URLSignerHMAC: secretKey is required")
}
uc := new(url.URL)
*uc = *baseURL
return &URLSignerHMAC{
baseURL: uc,
secretKey: secretKey,
}
}
// URLFromKey creates a signed URL by copying the baseURL and appending the
// object key, expiry, and signature as a query params.
func (h *URLSignerHMAC) URLFromKey(ctx context.Context, key string, opts *driver.SignedURLOptions) (*url.URL, error) {
sURL := new(url.URL)
*sURL = *h.baseURL
q := sURL.Query()
q.Set("obj", key)
q.Set("expiry", strconv.FormatInt(time.Now().Add(opts.Expiry).Unix(), 10))
q.Set("signature", h.getMAC(q))
sURL.RawQuery = q.Encode()
return sURL, nil
}
func (h *URLSignerHMAC) getMAC(q url.Values) string {
signedVals := url.Values{}
signedVals.Set("obj", q.Get("obj"))
signedVals.Set("expiry", q.Get("expiry"))
msg := signedVals.Encode()
hsh := hmac.New(sha256.New, h.secretKey)
hsh.Write([]byte(msg))
return base64.RawURLEncoding.EncodeToString(hsh.Sum(nil))
}
// KeyFromURL checks expiry and signature, and returns the object key
// only if the signed URL is both authentic and unexpired.
func (h *URLSignerHMAC) KeyFromURL(ctx context.Context, sURL *url.URL) (string, error) {
q := sURL.Query()
exp, err := strconv.ParseInt(q.Get("expiry"), 10, 64)
if err != nil || time.Now().Unix() > exp {
return "", errors.New("retrieving blob key from URL: key cannot be retrieved")
}
if !h.checkMAC(q) {
return "", errors.New("retrieving blob key from URL: key cannot be retrieved")
}
return q.Get("obj"), nil
}
func (h *URLSignerHMAC) checkMAC(q url.Values) bool {
mac := q.Get("signature")
expected := h.getMAC(q)
// This compares the Base-64 encoded MACs
return hmac.Equal([]byte(mac), []byte(expected))
}
| 1 | 15,065 | Should unsupported query parameters just be ignored? I think that's more common than error out. | google-go-cloud | go |
@@ -52,9 +52,9 @@ def namespace(namespace=None):
Call to set namespace of tasks declared after the call.
If called without arguments or with ``None`` as the namespace, the namespace
- is reset, which is recommended to do at the end of any file where the
- namespace is set to avoid unintentionally setting namespace on tasks outside
- of the scope of the current file.
+ is reset to :py:attr:`Register._UNSET_NAMESPACE`., which is recommended to do
+ at the end of any file where the namespace is set to avoid unintentionally
+ setting namespace on tasks outside of the scope of the current file.
The namespace of a Task can also be changed by specifying the property
``task_namespace``. This solution has the advantage that the namespace | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
from contextlib import contextmanager
import logging
import traceback
import warnings
import json
import hashlib
import re
from luigi import six
from luigi import parameter
from luigi.task_register import Register
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
TASK_ID_INCLUDE_PARAMS = 3
TASK_ID_TRUNCATE_PARAMS = 16
TASK_ID_TRUNCATE_HASH = 10
TASK_ID_INVALID_CHAR_REGEX = re.compile(r'[^A-Za-z0-9_]')
def namespace(namespace=None):
"""
Call to set namespace of tasks declared after the call.
If called without arguments or with ``None`` as the namespace, the namespace
is reset, which is recommended to do at the end of any file where the
namespace is set to avoid unintentionally setting namespace on tasks outside
of the scope of the current file.
The namespace of a Task can also be changed by specifying the property
``task_namespace``. This solution has the advantage that the namespace
doesn't have to be restored.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
"""
Register._default_namespace = namespace
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and no
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Each :py:class:`~luigi.Parameter` of the Task should be declared as members:
.. code:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
second_param = luigi.Parameter()
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
``Task.task_namespace``
optional string which is prepended to the task name for the sake of
scheduling. If it isn't overridden in a Task, whatever was last declared
using `luigi.namespace` will be used.
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or worker-timeout value in config file
#: Only works when using multiple workers.
worker_timeout = None
#: Maximum number of tasks to run together as a batch. Infinite by default
max_batch_size = float('inf')
@property
def batchable(self):
"""
True if this instance can be run as part of a batch. By default, True
if it has any batched parameters
"""
return bool(self.batch_param_names())
@property
def retry_count(self):
"""
Override this positive integer to have different ``retry_count`` at task level
Check :ref:`scheduler-config`
"""
return None
@property
def disable_hard_timeout(self):
"""
Override this positive integer to have different ``disable_hard_timeout`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def disable_window_seconds(self):
"""
Override this positive integer to have different ``disable_window_seconds`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def owner_email(self):
'''
Override this to send out additional error emails to task owner, in addition to the one
defined in the global configuration. This should return a string or a list of strings. e.g.
'[email protected]' or ['[email protected]', '[email protected]']
'''
return None
def _owner_list(self):
"""
Turns the owner_email property into a list. This should not be overridden.
"""
owner_email = self.owner_email
if owner_email is None:
return []
elif isinstance(owner_email, six.string_types):
return owner_email.split(',')
else:
return owner_email
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
@property
def task_family(self):
"""
Convenience method since a property on the metaclass isn't directly accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1]._counter)
return params
@classmethod
def batch_param_names(cls):
return [name for name, p in cls.get_params() if p._is_batchable()]
@classmethod
def get_param_names(cls, include_significant=False):
return [name for name, p in cls.get_params() if include_significant or p.significant]
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_name = cls.task_family
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = param_obj.normalize(arg)
# Then the keyword arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = params_dict[param_name].normalize(arg)
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_name, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_name, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register args and kwargs as an attribute on the class. Might be useful
self.param_args = tuple(value for key, value in param_values)
self.param_kwargs = dict(param_values)
self.task_id = task_id_str(self.task_family, self.to_str_params(only_significant=True))
self.__hash = hash(self.task_id)
self.set_tracking_url = None
self.set_status_message = None
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
@classmethod
def from_str_params(cls, params_str):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value as string.
"""
kwargs = {}
for param_name, param in cls.get_params():
if param_name in params_str:
param_str = params_str[param_name]
if isinstance(param_str, list):
kwargs[param_name] = param._parse_list(param_str)
else:
kwargs[param_name] = param.parse(param_str)
return cls(**kwargs)
def to_str_params(self, only_significant=False):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if (not only_significant) or params[param_name].significant:
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in kwargs:
new_k[param_name] = kwargs[param_name]
elif hasattr(self, param_name):
new_k[param_name] = getattr(self, param_name)
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5')`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.task_family, ', '.join(repr_parts))
return task_str
def __eq__(self, other):
return self.__class__ == other.__class__ and self.param_args == other.param_args
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exist.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
The returned value of this method is json encoded and sent to the scheduler
as the `expl` argument. Its string representation will be used as the
body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
@contextmanager
def no_unpicklable_properties(self):
"""
Remove unpicklable properties before dump task and resume them after.
This method could be called in subtask's dump method, to ensure unpicklable
properties won't break dump.
This method is a context-manager which can be called as below:
.. code-block: python
class DummyTask(luigi):
def _dump(self):
with self.no_unpicklable_properties():
pickle.dumps(self)
"""
unpicklable_properties = ('set_tracking_url', 'set_status_message')
reserved_properties = {}
for property_name in unpicklable_properties:
if hasattr(self, property_name):
reserved_properties[property_name] = getattr(self, property_name)
setattr(self, property_name, 'placeholder_during_pickling')
yield
for property_name, value in six.iteritems(reserved_properties):
setattr(self, property_name, value)
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
generated_tuples = []
for parameter_tuple in parameter_tuples:
if isinstance(parameter_tuple, (list, tuple)):
if cls(*parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
elif isinstance(parameter_tuple, dict):
if cls(**parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
else:
if cls(parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
return generated_tuples
def externalize(task):
"""
Returns an externalized version of the Task.
See :py:class:`ExternalTask`.
"""
task.run = None
return task
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = None
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""
Class for configuration. See :ref:`ConfigClasses`.
"""
# TODO: let's refactor Task & Config so that it inherits from a common
# ParamContainer base class
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
r = {}
for k, v in six.iteritems(struct):
r[k] = getpaths(v)
return r
else:
# Remaining case: assume r is iterable...
try:
s = list(struct)
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
return [getpaths(r) for r in s]
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
| 1 | 16,359 | Actually can we avoid mentioning `Register._UNSET_NAMESPACE`. I thought of it as an implementation detail of the simpler concept of "being unset". Perhaps the docs become more natural if we remove the "If called without arguments or with ..." part and just say "you have to call this function without arguments at the end of any file it has been used. That is to ensure blah blah". Something like that. Just a suggestion. | spotify-luigi | py |
@@ -426,8 +426,16 @@ class WinWordCollectionQuicknavIterator(object):
if self.direction=="previous":
index=itemCount-(index-1)
collectionItem=items[index]
- item=self.quickNavItemClass(self.itemType,self.document,collectionItem)
- itemRange=item.rangeObj
+ try:
+ item=self.quickNavItemClass(self.itemType,self.document,collectionItem)
+ itemRange=item.rangeObj
+ except:
+ values = (self.itemType, self.direction, itemCount, index)
+ message = ("Error iterating over item with "+
+ ("type: %s, iteration direction: %s, total item count: %s, item at index: %s" % values )+
+ "\nThis could be caused by an issue with some element within or a corruption of the word document.")
+ log.debugWarning(message ,exc_info=True)
+ continue
# Skip over the item we're already on.
if not self.includeCurrent and isFirst and ((self.direction=="next" and itemRange.start<=self.rangeObj.start) or (self.direction=="previous" and itemRange.end>self.rangeObj.end)):
continue | 1 | #appModules/winword.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2016 NV Access Limited, Manish Agrawal, Derek Riemer
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import ctypes
import time
from comtypes import COMError, GUID, BSTR
import comtypes.client
import comtypes.automation
import uuid
import operator
import locale
import collections
import colorsys
import sayAllHandler
import eventHandler
import braille
import scriptHandler
import languageHandler
import ui
import NVDAHelper
import XMLFormatting
from logHandler import log
import winUser
import oleacc
import globalVars
import speech
import config
import textInfos
import textInfos.offsets
import colors
import controlTypes
import treeInterceptorHandler
import browseMode
import review
from cursorManager import CursorManager, ReviewCursorManager
from tableUtils import HeaderCellInfo, HeaderCellTracker
from . import Window
from ..behaviors import EditableTextWithoutAutoSelectDetection
#Word constants
#wdLineSpacing rules
wdLineSpaceSingle=0
wdLineSpace1pt5=1
wdLineSpaceDouble=2
wdLineSpaceAtLeast=3
wdLineSpaceExactly=4
wdLineSpaceMultiple=5
# wdMeasurementUnits
wdInches=0
wdCentimeters=1
wdMillimeters=2
wdPoints=3
wdPicas=4
wdCollapseEnd=0
wdCollapseStart=1
#Indexing
wdActiveEndAdjustedPageNumber=1
wdActiveEndPageNumber=3
wdNumberOfPagesInDocument=4
wdHorizontalPositionRelativeToPage=5
wdFirstCharacterLineNumber=10
wdWithInTable=12
wdStartOfRangeRowNumber=13
wdMaximumNumberOfRows=15
wdStartOfRangeColumnNumber=16
wdMaximumNumberOfColumns=18
#Horizontal alignment
wdAlignParagraphLeft=0
wdAlignParagraphCenter=1
wdAlignParagraphRight=2
wdAlignParagraphJustify=3
#Units
wdCharacter=1
wdWord=2
wdSentence=3
wdParagraph=4
wdLine=5
wdStory=6
wdColumn=9
wdRow=10
wdWindow=11
wdCell=12
wdCharFormat=13
wdParaFormat=14
wdTable=15
#GoTo - direction
wdGoToAbsolute=1
wdGoToRelative=2
wdGoToNext=2
wdGoToPrevious=3
#GoTo - units
wdGoToBookmark=-1
wdGoToSection=0
wdGoToPage=1
wdGoToTable=2
wdGoToLine=3
wdGoToFootnote=4
wdGoToEndnote=5
wdGoToComment=6
wdGoToField=7
wdGoToGraphic=8
wdGoToObject=9
wdGoToEquation=10
wdGoToHeading=11
wdGoToPercent=12
wdGoToSpellingError=13
wdGoToGrammaticalError=14
wdGoToProofreadingError=15
wdCommentsStory=4
wdEndnotesStory=3
wdEvenPagesFooterStory=8
wdEvenPagesHeaderStory=6
wdFirstPageFooterStory=11
wdFirstPageHeaderStory=10
wdFootnotesStory=2
wdMainTextStory=1
wdPrimaryFooterStory=9
wdPrimaryHeaderStory=7
wdTextFrameStory=5
wdFieldFormTextInput=70
wdFieldFormCheckBox=71
wdFieldFormDropDown=83
wdContentControlRichText=0
wdContentControlText=1
wdContentControlPicture=2
wdContentControlComboBox=3
wdContentControlDropdownList=4
wdContentControlBuildingBlockGallery=5
wdContentControlDate=6
wdContentControlGroup=7
wdContentControlCheckBox=8
wdNoRevision=0
wdRevisionInsert=1
wdRevisionDelete=2
wdRevisionProperty=3
wdRevisionParagraphNumber=4
wdRevisionDisplayField=5
wdRevisionReconcile=6
wdRevisionConflict=7
wdRevisionStyle=8
wdRevisionReplace=9
wdRevisionParagraphProperty=10
wdRevisionTableProperty=11
wdRevisionSectionProperty=12
wdRevisionStyleDefinition=13
wdRevisionMovedFrom=14
wdRevisionMovedTo=15
wdRevisionCellInsertion=16
wdRevisionCellDeletion=17
wdRevisionCellMerge=18
# MsoThemeColorSchemeIndex
msoThemeAccent1=5
msoThemeAccent2=6
msoThemeAccent3=7
msoThemeAccent4=8
msoThemeAccent5=9
msoThemeAccent6=10
msoThemeDark1=1
msoThemeDark2=3
msoThemeFollowedHyperlink=12
msoThemeHyperlink=11
msoThemeLight1=2
msoThemeLight2=4
# WdThemeColorIndex
wdNotThemeColor=-1
wdThemeColorAccent1=4
wdThemeColorAccent2=5
wdThemeColorAccent3=6
wdThemeColorAccent4=7
wdThemeColorAccent5=8
wdThemeColorAccent6=9
wdThemeColorBackground1=12
wdThemeColorBackground2=14
wdThemeColorHyperlink=10
wdThemeColorHyperlinkFollowed=11
wdThemeColorMainDark1=0
wdThemeColorMainDark2=2
wdThemeColorMainLight1=1
wdThemeColorMainLight2=3
wdThemeColorText1=13
wdThemeColorText2=15
# Mapping from http://www.wordarticles.com/Articles/Colours/2007.php#UIConsiderations
WdThemeColorIndexToMsoThemeColorSchemeIndex={
wdThemeColorMainDark1:msoThemeDark1,
wdThemeColorMainLight1:msoThemeLight1,
wdThemeColorMainDark2:msoThemeDark2,
wdThemeColorMainLight2:msoThemeLight2,
wdThemeColorAccent1:msoThemeAccent1,
wdThemeColorAccent2:msoThemeAccent2,
wdThemeColorAccent3:msoThemeAccent3,
wdThemeColorAccent4:msoThemeAccent4,
wdThemeColorAccent5:msoThemeAccent5,
wdThemeColorAccent6:msoThemeAccent6,
wdThemeColorHyperlink:msoThemeHyperlink,
wdThemeColorHyperlinkFollowed:msoThemeFollowedHyperlink,
wdThemeColorBackground1:msoThemeLight1,
wdThemeColorText1:msoThemeDark1,
wdThemeColorBackground2:msoThemeLight2,
wdThemeColorText2:msoThemeDark2,
}
wdRevisionTypeLabels={
# Translators: a Microsoft Word revision type (inserted content)
wdRevisionInsert:_("insertion"),
# Translators: a Microsoft Word revision type (deleted content)
wdRevisionDelete:_("deletion"),
# Translators: a Microsoft Word revision type (changed content property, e.g. font, color)
wdRevisionProperty:_("property"),
# Translators: a Microsoft Word revision type (changed paragraph number)
wdRevisionParagraphNumber:_("paragraph number"),
# Translators: a Microsoft Word revision type (display field)
wdRevisionDisplayField:_("display field"),
# Translators: a Microsoft Word revision type (reconcile)
wdRevisionReconcile:_("reconcile"),
# Translators: a Microsoft Word revision type (conflicting revision)
wdRevisionConflict:_("conflict"),
# Translators: a Microsoft Word revision type (style change)
wdRevisionStyle:_("style"),
# Translators: a Microsoft Word revision type (replaced content)
wdRevisionReplace:_("replace"),
# Translators: a Microsoft Word revision type (changed paragraph property, e.g. alignment)
wdRevisionParagraphProperty:_("paragraph property"),
# Translators: a Microsoft Word revision type (table)
wdRevisionTableProperty:_("table property"),
# Translators: a Microsoft Word revision type (section property)
wdRevisionSectionProperty:_("section property"),
# Translators: a Microsoft Word revision type (style definition)
wdRevisionStyleDefinition:_("style definition"),
# Translators: a Microsoft Word revision type (moved from)
wdRevisionMovedFrom:_("moved from"),
# Translators: a Microsoft Word revision type (moved to)
wdRevisionMovedTo:_("moved to"),
# Translators: a Microsoft Word revision type (inserted table cell)
wdRevisionCellInsertion:_("cell insertion"),
# Translators: a Microsoft Word revision type (deleted table cell)
wdRevisionCellDeletion:_("cell deletion"),
# Translators: a Microsoft Word revision type (merged table cells)
wdRevisionCellMerge:_("cell merge"),
}
storyTypeLocalizedLabels={
wdCommentsStory:_("Comments"),
wdEndnotesStory:_("Endnotes"),
wdEvenPagesFooterStory:_("Even pages footer"),
wdEvenPagesHeaderStory:_("Even pages header"),
wdFirstPageFooterStory:_("First page footer"),
wdFirstPageHeaderStory:_("First page header"),
wdFootnotesStory:_("Footnotes"),
wdPrimaryFooterStory:_("Primary footer"),
wdPrimaryHeaderStory:_("Primary header"),
wdTextFrameStory:_("Text frame"),
}
wdFieldTypesToNVDARoles={
wdFieldFormTextInput:controlTypes.ROLE_EDITABLETEXT,
wdFieldFormCheckBox:controlTypes.ROLE_CHECKBOX,
wdFieldFormDropDown:controlTypes.ROLE_COMBOBOX,
}
wdContentControlTypesToNVDARoles={
wdContentControlRichText:controlTypes.ROLE_EDITABLETEXT,
wdContentControlText:controlTypes.ROLE_EDITABLETEXT,
wdContentControlPicture:controlTypes.ROLE_GRAPHIC,
wdContentControlComboBox:controlTypes.ROLE_COMBOBOX,
wdContentControlDropdownList:controlTypes.ROLE_COMBOBOX,
wdContentControlDate:controlTypes.ROLE_EDITABLETEXT,
wdContentControlGroup:controlTypes.ROLE_GROUPING,
wdContentControlCheckBox:controlTypes.ROLE_CHECKBOX,
}
winwordWindowIid=GUID('{00020962-0000-0000-C000-000000000046}')
wm_winword_expandToLine=ctypes.windll.user32.RegisterWindowMessageW(u"wm_winword_expandToLine")
NVDAUnitsToWordUnits={
textInfos.UNIT_CHARACTER:wdCharacter,
textInfos.UNIT_WORD:wdWord,
textInfos.UNIT_LINE:wdLine,
textInfos.UNIT_SENTENCE:wdSentence,
textInfos.UNIT_PARAGRAPH:wdParagraph,
textInfos.UNIT_TABLE:wdTable,
textInfos.UNIT_CELL:wdCell,
textInfos.UNIT_ROW:wdRow,
textInfos.UNIT_COLUMN:wdColumn,
textInfos.UNIT_STORY:wdStory,
textInfos.UNIT_READINGCHUNK:wdSentence,
}
formatConfigFlagsMap={
"reportFontName":1,
"reportFontSize":2,
"reportFontAttributes":4,
"reportColor":8,
"reportAlignment":16,
"reportStyle":32,
"reportSpellingErrors":64,
"reportPage":128,
"reportLineNumber":256,
"reportTables":512,
"reportLists":1024,
"reportLinks":2048,
"reportComments":4096,
"reportHeadings":8192,
"autoLanguageSwitching":16384,
"reportRevisions":32768,
"reportParagraphIndentation":65536,
"reportLineSpacing":262144,
}
formatConfigFlag_includeLayoutTables=131072
class WordDocumentHeadingQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,nodeType,document,textInfo,level):
self.level=level
super(WordDocumentHeadingQuickNavItem,self).__init__(nodeType,document,textInfo)
def isChild(self,parent):
if not isinstance(parent,WordDocumentHeadingQuickNavItem):
return False
return self.level>parent.level
class WordDocumentCollectionQuickNavItem(browseMode.TextInfoQuickNavItem):
"""
A QuickNavItem representing an item that MS Word stores as a collection (e.g. link, table etc).
"""
def rangeFromCollectionItem(self,item):
"""
Fetches a Microsoft Word range object from a Microsoft Word item in a collection. E.g. a HyperLink object.
@param item: an item from a collection (E.g. a HyperLink object).
"""
return item.range
def __init__(self,itemType,document,collectionItem):
"""
See L{TextInfoQuickNavItem} for itemType and document argument definitions.
@param collectionItem: an item from an MS Word collection e.g. HyperLink object.
"""
self.collectionItem=collectionItem
self.rangeObj=self.rangeFromCollectionItem(collectionItem)
textInfo=BrowseModeWordDocumentTextInfo(document,None,_rangeObj=self.rangeObj)
super(WordDocumentCollectionQuickNavItem,self).__init__(itemType,document,textInfo)
class WordDocumentCommentQuickNavItem(WordDocumentCollectionQuickNavItem):
@property
def label(self):
author=self.collectionItem.author
date=self.collectionItem.date
text=self.collectionItem.range.text
return _(u"comment: {text} by {author} on {date}").format(author=author,text=text,date=date)
def rangeFromCollectionItem(self,item):
return item.scope
class WordDocumentRevisionQuickNavItem(WordDocumentCollectionQuickNavItem):
@property
def label(self):
revisionType=wdRevisionTypeLabels.get(self.collectionItem.type)
author=self.collectionItem.author or ""
date=self.collectionItem.date
description=self.collectionItem.formatDescription or ""
text=(self.collectionItem.range.text or "")[:100]
return _(u"{revisionType} {description}: {text} by {author} on {date}").format(revisionType=revisionType,author=author,text=text,date=date,description=description)
class WinWordCollectionQuicknavIterator(object):
"""
Allows iterating over an MS Word collection (e.g. HyperLinks) emitting L{QuickNavItem} objects.
"""
quickNavItemClass=WordDocumentCollectionQuickNavItem #: the QuickNavItem class that should be instanciated and emitted.
def __init__(self,itemType,document,direction,rangeObj,includeCurrent):
"""
See L{QuickNavItemIterator} for itemType, document and direction definitions.
@param rangeObj: a Microsoft Word range object where the collection should be fetched from.
@ param includeCurrent: if true then any item at the initial position will be also emitted rather than just further ones.
"""
self.document=document
self.itemType=itemType
self.direction=direction if direction else "next"
self.rangeObj=rangeObj
self.includeCurrent=includeCurrent
def collectionFromRange(self,rangeObj):
"""
Fetches a Microsoft Word collection object from a Microsoft Word range object. E.g. HyperLinks from a range.
@param rangeObj: a Microsoft Word range object.
@return: a Microsoft Word collection object.
"""
raise NotImplementedError
def filter(self,item):
"""
Only allows certain items fom a collection to be emitted. E.g. a table who's borders are enabled.
@param item: an item from a Microsoft Word collection (e.g. HyperLink object).
@return True if this item should be allowd, false otherwise.
@rtype: bool
"""
return True
def iterate(self):
"""
returns a generator that emits L{QuickNavItem} objects for this collection.
"""
if self.direction=="next":
self.rangeObj.moveEnd(wdStory,1)
elif self.direction=="previous":
self.rangeObj.collapse(wdCollapseStart)
self.rangeObj.moveStart(wdStory,-1)
items=self.collectionFromRange(self.rangeObj)
itemCount=items.count
isFirst=True
for index in xrange(1,itemCount+1):
if self.direction=="previous":
index=itemCount-(index-1)
collectionItem=items[index]
item=self.quickNavItemClass(self.itemType,self.document,collectionItem)
itemRange=item.rangeObj
# Skip over the item we're already on.
if not self.includeCurrent and isFirst and ((self.direction=="next" and itemRange.start<=self.rangeObj.start) or (self.direction=="previous" and itemRange.end>self.rangeObj.end)):
continue
if not self.filter(collectionItem):
continue
yield item
isFirst=False
class LinkWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
def collectionFromRange(self,rangeObj):
return rangeObj.hyperlinks
class CommentWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentCommentQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.comments
class RevisionWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentRevisionQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.revisions
class GraphicWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
def collectionFromRange(self,rangeObj):
return rangeObj.inlineShapes
def filter(self,item):
return 2<item.type<5
class TableWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
def collectionFromRange(self,rangeObj):
return rangeObj.tables
def filter(self,item):
return item.borders.enable
class WordDocumentTextInfo(textInfos.TextInfo):
# #4852: temporary fix.
# force mouse reading chunk to sentense to make it what it used to be in 2014.4.
# We need to however fix line so it does not accidentially scroll.
def _get_unit_mouseChunk(self):
unit=super(WordDocumentTextInfo,self).unit_mouseChunk
if unit==textInfos.UNIT_LINE:
unit=textInfos.UNIT_SENTENCE
return unit
def copyToClipboard(self):
self._rangeObj.copy()
return True
def find(self,text,caseSensitive=False,reverse=False):
f=self._rangeObj.find
f.text=text
f.matchCase=caseSensitive
f.forward=not reverse
return f.execute()
shouldIncludeLayoutTables=True #: layout tables should always be included (no matter the user's browse mode setting).
def activate(self):
import mathPres
mathMl=mathPres.getMathMlFromTextInfo(self)
if mathMl:
return mathPres.interactWithMathMl(mathMl)
# Handle activating links.
# It is necessary to expand to word to get a link as the link's first character is never actually in the link!
tempRange=self._rangeObj.duplicate
tempRange.expand(wdWord)
links=tempRange.hyperlinks
if links.count>0:
links[1].follow()
return
def _expandToLineAtCaret(self):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
res=NVDAHelper.localLib.nvdaInProcUtils_winword_expandToLine(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,self._rangeObj.start,ctypes.byref(lineStart),ctypes.byref(lineEnd))
if res!=0 or lineStart.value==lineEnd.value or lineStart.value==-1 or lineEnd.value==-1:
log.debugWarning("winword_expandToLine failed")
self._rangeObj.expand(wdParagraph)
return
self._rangeObj.setRange(lineStart.value,lineEnd.value)
def __init__(self,obj,position,_rangeObj=None):
super(WordDocumentTextInfo,self).__init__(obj,position)
if _rangeObj:
self._rangeObj=_rangeObj.Duplicate
return
if isinstance(position,textInfos.Point):
try:
self._rangeObj=self.obj.WinwordDocumentObject.activeWindow.RangeFromPoint(position.x,position.y)
except COMError:
raise NotImplementedError
elif position==textInfos.POSITION_SELECTION:
self._rangeObj=self.obj.WinwordSelectionObject.range
elif position==textInfos.POSITION_CARET:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.Collapse()
elif position==textInfos.POSITION_ALL:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.Expand(wdStory)
elif position==textInfos.POSITION_FIRST:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.SetRange(0,0)
elif position==textInfos.POSITION_LAST:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.endOf(wdStory)
self._rangeObj.move(wdCharacter,-1)
elif isinstance(position,textInfos.offsets.Offsets):
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.SetRange(position.startOffset,position.endOffset)
else:
raise NotImplementedError("position: %s"%position)
def getTextWithFields(self,formatConfig=None):
if self.isCollapsed: return []
if self.obj.ignoreFormatting:
return [self.text]
extraDetail=formatConfig.get('extraDetail',False) if formatConfig else False
if not formatConfig:
formatConfig=config.conf['documentFormatting']
formatConfig['autoLanguageSwitching']=config.conf['speech'].get('autoLanguageSwitching',False)
startOffset=self._rangeObj.start
endOffset=self._rangeObj.end
text=BSTR()
formatConfigFlags=sum(y for x,y in formatConfigFlagsMap.iteritems() if formatConfig.get(x,False))
if self.shouldIncludeLayoutTables:
formatConfigFlags+=formatConfigFlag_includeLayoutTables
if self.obj.ignoreEditorRevisions:
formatConfigFlags&=~formatConfigFlagsMap['reportRevisions']
res=NVDAHelper.localLib.nvdaInProcUtils_winword_getTextInRange(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,startOffset,endOffset,formatConfigFlags,ctypes.byref(text))
if res or not text:
log.debugWarning("winword_getTextInRange failed with %d"%res)
return [self.text]
commandList=XMLFormatting.XMLTextParser().parse(text.value)
for index,item in enumerate(commandList):
if isinstance(item,textInfos.FieldCommand):
field=item.field
if isinstance(field,textInfos.ControlField):
item.field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
item.field=self._normalizeFormatField(field,extraDetail=extraDetail)
elif index>0 and isinstance(item,basestring) and item.isspace():
#2047: don't expose language for whitespace as its incorrect for east-asian languages
lastItem=commandList[index-1]
if isinstance(lastItem,textInfos.FieldCommand) and isinstance(lastItem.field,textInfos.FormatField):
try:
del lastItem.field['language']
except KeyError:
pass
return commandList
def _normalizeControlField(self,field):
role=field.pop('role',None)
if role=="heading":
role=controlTypes.ROLE_HEADING
elif role=="table":
role=controlTypes.ROLE_TABLE
field['table-rowcount']=int(field.get('table-rowcount',0))
field['table-columncount']=int(field.get('table-columncount',0))
elif role=="tableCell":
role=controlTypes.ROLE_TABLECELL
field['table-rownumber']=int(field.get('table-rownumber',0))
field['table-columnnumber']=int(field.get('table-columnnumber',0))
elif role=="footnote":
role=controlTypes.ROLE_FOOTNOTE
elif role=="endnote":
role=controlTypes.ROLE_ENDNOTE
elif role=="graphic":
role=controlTypes.ROLE_GRAPHIC
elif role=="object":
progid=field.get("progid")
if progid and progid.startswith("Equation.DSMT"):
# MathType.
role=controlTypes.ROLE_MATH
else:
role=controlTypes.ROLE_EMBEDDEDOBJECT
else:
fieldType=int(field.pop('wdFieldType',-1))
if fieldType!=-1:
role=wdFieldTypesToNVDARoles.get(fieldType,controlTypes.ROLE_UNKNOWN)
if fieldType==wdFieldFormCheckBox and int(field.get('wdFieldResult','0'))>0:
field['states']=set([controlTypes.STATE_CHECKED])
elif fieldType==wdFieldFormDropDown:
field['value']=field.get('wdFieldResult',None)
fieldStatusText=field.pop('wdFieldStatusText',None)
if fieldStatusText:
field['name']=fieldStatusText
field['alwaysReportName']=True
else:
fieldType=int(field.get('wdContentControlType',-1))
if fieldType!=-1:
role=wdContentControlTypesToNVDARoles.get(fieldType,controlTypes.ROLE_UNKNOWN)
if role==controlTypes.ROLE_CHECKBOX:
fieldChecked=bool(int(field.get('wdContentControlChecked','0')))
if fieldChecked:
field['states']=set([controlTypes.STATE_CHECKED])
fieldTitle=field.get('wdContentControlTitle',None)
if fieldTitle:
field['name']=fieldTitle
field['alwaysReportName']=True
if role is not None: field['role']=role
if role==controlTypes.ROLE_TABLE and field.get('longdescription'):
field['states']=set([controlTypes.STATE_HASLONGDESC])
storyType=int(field.pop('wdStoryType',0))
if storyType:
name=storyTypeLocalizedLabels.get(storyType,None)
if name:
field['name']=name
field['alwaysReportName']=True
field['role']=controlTypes.ROLE_FRAME
# Hack support for lazy fetching of row and column header text values
class ControlField(textInfos.ControlField):
def get(d,name,default=None):
if name=="table-rowheadertext":
try:
cell=self._rangeObj.cells[1]
except IndexError:
log.debugWarning("no cells for table row, possibly on end of cell mark")
return super(ControlField,d).get(name,default)
return self.obj.fetchAssociatedHeaderCellText(cell,False)
elif name=="table-columnheadertext":
try:
cell=self._rangeObj.cells[1]
except IndexError:
log.debugWarning("no cells for table row, possibly on end of cell mark")
return super(ControlField,d).get(name,default)
return self.obj.fetchAssociatedHeaderCellText(cell,True)
else:
return super(ControlField,d).get(name,default)
newField=ControlField()
newField.update(field)
return newField
def _normalizeFormatField(self,field,extraDetail=False):
_startOffset=int(field.pop('_startOffset'))
_endOffset=int(field.pop('_endOffset'))
lineSpacingRule=field.pop('wdLineSpacingRule',None)
lineSpacingVal=field.pop('wdLineSpacing',None)
if lineSpacingRule is not None:
lineSpacingRule=int(lineSpacingRule)
if lineSpacingRule==wdLineSpaceSingle:
# Translators: single line spacing
field['line-spacing']=pgettext('line spacing value',"single")
elif lineSpacingRule==wdLineSpaceDouble:
# Translators: double line spacing
field['line-spacing']=pgettext('line spacing value',"double")
elif lineSpacingRule==wdLineSpace1pt5:
# Translators: line spacing of 1.5 lines
field['line-spacing']=pgettext('line spacing value',"1.5 lines")
elif lineSpacingRule==wdLineSpaceExactly:
# Translators: exact (minimum) line spacing
field['line-spacing']=pgettext('line spacing value',"exact")
elif lineSpacingRule==wdLineSpaceAtLeast:
# Translators: line spacing of at least x point
field['line-spacing']=pgettext('line spacing value',"at least %.1f pt")%float(lineSpacingVal)
elif lineSpacingRule==wdLineSpaceMultiple:
# Translators: line spacing of x lines
field['line-spacing']=pgettext('line spacing value',"%.1f lines")%(float(lineSpacingVal)/12.0)
revisionType=int(field.pop('wdRevisionType',0))
if revisionType==wdRevisionInsert:
field['revision-insertion']=True
elif revisionType==wdRevisionDelete:
field['revision-deletion']=True
elif revisionType:
revisionLabel=wdRevisionTypeLabels.get(revisionType,None)
if revisionLabel:
field['revision']=revisionLabel
color=field.pop('color',None)
if color is not None:
field['color']=self.obj.winwordColorToNVDAColor(int(color))
try:
languageId = int(field.pop('wdLanguageId',0))
if languageId:
field['language']=self._getLanguageFromLcid(languageId)
except:
log.debugWarning("language error",exc_info=True)
pass
for x in ("first-line-indent","left-indent","right-indent","hanging-indent"):
v=field.get(x)
if not v: continue
v=float(v)
if abs(v)<0.001:
v=None
else:
v=self.obj.getLocalizedMeasurementTextForPointSize(v)
field[x]=v
return field
def _getLanguageFromLcid(self, lcid):
"""
gets a normalized locale from a lcid
"""
lang = locale.windows_locale[lcid]
if lang:
return languageHandler.normalizeLanguage(lang)
def expand(self,unit):
if unit==textInfos.UNIT_LINE:
try:
if self._rangeObj.tables.count>0 and self._rangeObj.cells.count==0:
unit=textInfos.UNIT_CHARACTER
except COMError:
pass
if unit==textInfos.UNIT_LINE:
self._expandToLineAtCaret()
elif unit==textInfos.UNIT_CHARACTER:
self._rangeObj.moveEnd(wdCharacter,1)
elif unit in NVDAUnitsToWordUnits:
self._rangeObj.Expand(NVDAUnitsToWordUnits[unit])
else:
raise NotImplementedError("unit: %s"%unit)
def compareEndPoints(self,other,which):
if which=="startToStart":
diff=self._rangeObj.Start-other._rangeObj.Start
elif which=="startToEnd":
diff=self._rangeObj.Start-other._rangeObj.End
elif which=="endToStart":
diff=self._rangeObj.End-other._rangeObj.Start
elif which=="endToEnd":
diff=self._rangeObj.End-other._rangeObj.End
else:
raise ValueError("bad argument - which: %s"%which)
if diff<0:
diff=-1
elif diff>0:
diff=1
return diff
def setEndPoint(self,other,which):
if which=="startToStart":
self._rangeObj.Start=other._rangeObj.Start
elif which=="startToEnd":
self._rangeObj.Start=other._rangeObj.End
elif which=="endToStart":
self._rangeObj.End=other._rangeObj.Start
elif which=="endToEnd":
self._rangeObj.End=other._rangeObj.End
else:
raise ValueError("bad argument - which: %s"%which)
def _get_isCollapsed(self):
if self._rangeObj.Start==self._rangeObj.End:
return True
else:
return False
def collapse(self,end=False):
if end:
oldEndOffset=self._rangeObj.end
self._rangeObj.collapse(wdCollapseEnd if end else wdCollapseStart)
if end and self._rangeObj.end<oldEndOffset:
raise RuntimeError
def copy(self):
return WordDocumentTextInfo(self.obj,None,_rangeObj=self._rangeObj)
def _get_text(self):
text=self._rangeObj.text
if not text:
text=""
return text
def _move(self,unit,direction,endPoint=None,_rangeObj=None):
if not _rangeObj:
_rangeObj=self._rangeObj
if unit in NVDAUnitsToWordUnits:
unit=NVDAUnitsToWordUnits[unit]
else:
raise NotImplementedError("unit: %s"%unit)
if endPoint=="start":
moveFunc=_rangeObj.MoveStart
elif endPoint=="end":
moveFunc=_rangeObj.MoveEnd
else:
moveFunc=_rangeObj.Move
res=moveFunc(unit,direction)
#units higher than character and word expand to contain the last text plus the insertion point offset in the document
#However move from a character before will incorrectly move to this offset which makes move/expand contridictory to each other
#Make sure that move fails if it lands on the final offset but the unit is bigger than character/word
if direction>0 and endPoint!="end" and unit not in (wdCharacter,wdWord) and (_rangeObj.start+1)==self.obj.WinwordDocumentObject.characters.count:
return 0
return res
def move(self,unit,direction,endPoint=None):
if unit!=textInfos.UNIT_LINE:
return self._move(unit,direction,endPoint)
if direction==0 or direction>1 or direction<-1:
raise NotImplementedError("moving by line is only supported collapsed and with a count of 1 or -1")
oldOffset=self._rangeObj.end if endPoint=="end" else self._rangeObj.start
newOffset=ctypes.c_long()
# Try moving by line making use of the selection temporarily
res=NVDAHelper.localLib.nvdaInProcUtils_winword_moveByLine(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,oldOffset,1 if direction<0 else 0,ctypes.byref(newOffset))
if res==0:
res=direction
newOffset=newOffset.value
if direction<0 and not endPoint and newOffset==oldOffset:
# Moving backwards by line seemed to not move.
# Therefore fallback to moving back a character, expanding to line and collapsing to start instead.
self.move(textInfos.UNIT_CHARACTER,-1)
self.expand(unit)
self.collapse()
elif direction>0 and not endPoint and newOffset<oldOffset:
# Moving forward by line seems to have wrapped back before the original position
# This can happen in some tables with merged rows.
# Try moving forward by cell, but if that fails, jump past the entire table.
res=self.move(textInfos.UNIT_CELL,direction,endPoint)
if res==0:
self.expand(textInfos.UNIT_TABLE)
self.collapse(end=True)
else:
# the move by line using the selection succeeded. Therefore update this TextInfo's position.
if not endPoint:
self._rangeObj.setRange(newOffset,newOffset)
elif endPoint=="start":
self._rangeObj.start=newOffset
elif endPoint=="end":
self._rangeObj.end=newOffset
return res
def _get_bookmark(self):
return textInfos.offsets.Offsets(self._rangeObj.Start,self._rangeObj.End)
def updateCaret(self):
self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj)
self.obj.WinwordSelectionObject.SetRange(self._rangeObj.Start,self._rangeObj.Start)
def updateSelection(self):
self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj)
self.obj.WinwordSelectionObject.SetRange(self._rangeObj.Start,self._rangeObj.End)
def getMathMl(self, field):
try:
import mathType
except:
raise LookupError("MathType not installed")
range = self._rangeObj.Duplicate
range.Start = int(field["shapeoffset"])
obj = range.InlineShapes[0].OLEFormat
try:
return mathType.getMathMl(obj)
except:
raise LookupError("Couldn't get MathML from MathType")
class WordDocumentTextInfoForTreeInterceptor(WordDocumentTextInfo):
def _get_shouldIncludeLayoutTables(self):
return config.conf['documentFormatting']['includeLayoutTables']
class BrowseModeWordDocumentTextInfo(browseMode.BrowseModeDocumentTextInfo,treeInterceptorHandler.RootProxyTextInfo):
def __init__(self,obj,position,_rangeObj=None):
if isinstance(position,WordDocument):
position=textInfos.POSITION_CARET
super(BrowseModeWordDocumentTextInfo,self).__init__(obj,position,_rangeObj=_rangeObj)
InnerTextInfoClass=WordDocumentTextInfoForTreeInterceptor
def _get_focusableNVDAObjectAtStart(self):
return self.obj.rootNVDAObject
class WordDocumentTreeInterceptor(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=BrowseModeWordDocumentTextInfo
def _activateLongDesc(self,controlField):
longDesc=controlField.get('longdescription')
# Translators: the title of the message dialog desplaying an MS Word table description.
ui.browseableMessage(longDesc,_("Table description"))
def _get_isAlive(self):
return winUser.isWindow(self.rootNVDAObject.windowHandle)
def __contains__(self,obj):
return obj==self.rootNVDAObject
def _get_ElementsListDialog(self):
return ElementsListDialog
def _iterHeadings(self,nodeType,direction,rangeObj,includeCurrent):
neededLevel=int(nodeType[7:]) if len(nodeType)>7 else 0
isFirst=True
while True:
if not isFirst or includeCurrent:
level=rangeObj.paragraphs[1].outlineLevel
if level and 0<level<10 and (not neededLevel or neededLevel==level):
rangeObj.expand(wdParagraph)
yield WordDocumentHeadingQuickNavItem(nodeType,self,BrowseModeWordDocumentTextInfo(self,None,_rangeObj=rangeObj),level)
isFirst=False
if direction=="next":
newRangeObj=rangeObj.gotoNext(wdGoToHeading)
if not newRangeObj or newRangeObj.start<=rangeObj.start:
break
elif direction=="previous":
newRangeObj=rangeObj.gotoPrevious(wdGoToHeading)
if not newRangeObj or newRangeObj.start>=rangeObj.start:
break
rangeObj=newRangeObj
def _iterNodesByType(self,nodeType,direction="next",pos=None):
if pos:
rangeObj=pos.innerTextInfo._rangeObj
else:
rangeObj=self.rootNVDAObject.WinwordDocumentObject.range(0,0)
includeCurrent=False if pos else True
if nodeType=="link":
return LinkWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType=="annotation":
comments=CommentWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
revisions=RevisionWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
return browseMode.mergeQuickNavItemIterators([comments,revisions],direction)
elif nodeType in ("table","container"):
return TableWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType=="graphic":
return GraphicWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType.startswith('heading'):
return self._iterHeadings(nodeType,direction,rangeObj,includeCurrent)
else:
raise NotImplementedError
def _activatePosition(self, info=None):
if not info:
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.activate()
def script_nextRow(self,gesture):
self.rootNVDAObject._moveInTable(row=True,forward=True)
braille.handler.handleCaretMove(self)
def script_previousRow(self,gesture):
self.rootNVDAObject._moveInTable(row=True,forward=False)
braille.handler.handleCaretMove(self)
def script_nextColumn(self,gesture):
self.rootNVDAObject._moveInTable(row=False,forward=True)
braille.handler.handleCaretMove(self)
def script_previousColumn(self,gesture):
self.rootNVDAObject._moveInTable(row=False,forward=False)
braille.handler.handleCaretMove(self)
__gestures={
"kb:tab":"trapNonCommandGesture",
"kb:shift+tab":"trapNonCommandGesture",
"kb:control+alt+upArrow": "previousRow",
"kb:control+alt+downArrow": "nextRow",
"kb:control+alt+leftArrow": "previousColumn",
"kb:control+alt+rightArrow": "nextColumn",
# We want to fall back to MS Word's real page up and page down, rather than browseMode's faked 25 lines
"kb:pageUp":None,
"kb:pageDown":None,
"kb:shift+pageUp":None,
"kb:shift+pageDown":None,
}
class WordDocument(EditableTextWithoutAutoSelectDetection, Window):
treeInterceptorClass=WordDocumentTreeInterceptor
shouldCreateTreeInterceptor=False
TextInfo=WordDocumentTextInfo
def winwordColorToNVDAColor(self,val):
if val>=0:
# normal RGB value
return colors.RGB.fromCOLORREF(val).name
elif (val&0xffffffff)==0xff000000:
# Translators: the default (automatic) color in Microsoft Word
return _("default color")
elif ((val>>28)&0xf)==0xd and ((val>>16)&0xff)==0x00:
# An MS word color index Plus intencity
# Made up of MS Word Theme Color index, hsv value ratio (MS Word darker percentage) and hsv saturation ratio (MS Word lighter percentage)
# Info: http://www.wordarticles.com/Articles/Colours/2007.php#UIConsiderations
saturationRatio=(val&0xff)/255.0
valueRatio=((val>>8)&0xff)/255.0
themeColorIndex=(val>>24)&0x0f
# Convert the MS Word theme color index to an MS Office color scheme index
schemeColorIndex=WdThemeColorIndexToMsoThemeColorSchemeIndex[themeColorIndex]
# Lookup the rgb value for the MS Office scheme color index based on the current theme
colorref=self.WinwordDocumentObject.documentTheme.themeColorScheme(schemeColorIndex).rgb
# Convert the rgb value to hsv and apply the saturation and value ratios
rgb=tuple(x/255.0 for x in colors.RGB.fromCOLORREF(colorref))
hsv=colorsys.rgb_to_hsv(*rgb)
hsv=(hsv[0],hsv[1]*saturationRatio,hsv[2]*valueRatio)
rgb=colorsys.hsv_to_rgb(*hsv)
name=colors.RGB(rgb[0]*255,rgb[1]*255,rgb[2]*255).name
return name
else:
raise ValueError("Unknown color format %x %x %x %x"%((val>>24)&0xff,(val>>16)&0xff,(val>>8)&0xff,val&0xff))
def _get_ignoreEditorRevisions(self):
try:
ignore=not self.WinwordWindowObject.view.showRevisionsAndComments
except COMError:
log.debugWarning("showRevisionsAndComments",exc_info=True)
ignore=False
self.ignoreEditorRevisions=ignore
return ignore
#: True if formatting should be ignored (text only) such as for spellCheck error field
ignoreFormatting=False
def __init__(self,*args,**kwargs):
super(WordDocument,self).__init__(*args,**kwargs)
def event_caret(self):
curSelectionPos=self.makeTextInfo(textInfos.POSITION_SELECTION)
lastSelectionPos=getattr(self,'_lastSelectionPos',None)
self._lastSelectionPos=curSelectionPos
if lastSelectionPos:
if curSelectionPos._rangeObj.isEqual(lastSelectionPos._rangeObj):
return
super(WordDocument,self).event_caret()
def _get_role(self):
return controlTypes.ROLE_EDITABLETEXT
def _get_states(self):
states=super(WordDocument,self).states
states.add(controlTypes.STATE_MULTILINE)
return states
def populateHeaderCellTrackerFromHeaderRows(self,headerCellTracker,table):
rows=table.rows
numHeaderRows=0
for rowIndex in xrange(rows.count):
try:
row=rows.item(rowIndex+1)
except COMError:
break
try:
headingFormat=row.headingFormat
except (COMError,AttributeError,NameError):
headingFormat=0
if headingFormat==-1: # is a header row
numHeaderRows+=1
else:
break
if numHeaderRows>0:
headerCellTracker.addHeaderCellInfo(rowNumber=1,columnNumber=1,rowSpan=numHeaderRows,isColumnHeader=True,isRowHeader=False)
def populateHeaderCellTrackerFromBookmarks(self,headerCellTracker,bookmarks):
for x in bookmarks:
name=x.name
lowerName=name.lower()
isColumnHeader=isRowHeader=False
if lowerName.startswith('title'):
isColumnHeader=isRowHeader=True
elif lowerName.startswith('columntitle'):
isColumnHeader=True
elif lowerName.startswith('rowtitle'):
isRowHeader=True
else:
continue
try:
headerCell=x.range.cells.item(1)
except COMError:
continue
headerCellTracker.addHeaderCellInfo(rowNumber=headerCell.rowIndex,columnNumber=headerCell.columnIndex,name=name,isColumnHeader=isColumnHeader,isRowHeader=isRowHeader)
_curHeaderCellTrackerTable=None
_curHeaderCellTracker=None
def getHeaderCellTrackerForTable(self,table):
tableRange=table.range
if not self._curHeaderCellTrackerTable or not tableRange.isEqual(self._curHeaderCellTrackerTable.range):
self._curHeaderCellTracker=HeaderCellTracker()
self.populateHeaderCellTrackerFromBookmarks(self._curHeaderCellTracker,tableRange.bookmarks)
self.populateHeaderCellTrackerFromHeaderRows(self._curHeaderCellTracker,table)
self._curHeaderCellTrackerTable=table
return self._curHeaderCellTracker
def setAsHeaderCell(self,cell,isColumnHeader=False,isRowHeader=False):
rowNumber=cell.rowIndex
columnNumber=cell.columnIndex
headerCellTracker=self.getHeaderCellTrackerForTable(cell.range.tables[1])
oldInfo=headerCellTracker.getHeaderCellInfoAt(rowNumber,columnNumber)
if oldInfo:
if isColumnHeader and not oldInfo.isColumnHeader:
oldInfo.isColumnHeader=True
elif isRowHeader and not oldInfo.isRowHeader:
oldInfo.isRowHeader=True
else:
return False
isColumnHeader=oldInfo.isColumnHeader
isRowHeader=oldInfo.isRowHeader
if isColumnHeader and isRowHeader:
name="Title_"
elif isRowHeader:
name="RowTitle_"
elif isColumnHeader:
name="ColumnTitle_"
else:
raise ValueError("One or both of isColumnHeader or isRowHeader must be True")
name+=uuid.uuid4().hex
if oldInfo:
self.WinwordDocumentObject.bookmarks[oldInfo.name].delete()
oldInfo.name=name
else:
headerCellTracker.addHeaderCellInfo(rowNumber=rowNumber,columnNumber=columnNumber,name=name,isColumnHeader=isColumnHeader,isRowHeader=isRowHeader)
self.WinwordDocumentObject.bookmarks.add(name,cell.range)
return True
def forgetHeaderCell(self,cell,isColumnHeader=False,isRowHeader=False):
rowNumber=cell.rowIndex
columnNumber=cell.columnIndex
if not isColumnHeader and not isRowHeader:
return False
headerCellTracker=self.getHeaderCellTrackerForTable(cell.range.tables[1])
info=headerCellTracker.getHeaderCellInfoAt(rowNumber,columnNumber)
if not info or not hasattr(info,'name'):
return False
if isColumnHeader and info.isColumnHeader:
info.isColumnHeader=False
elif isRowHeader and info.isRowHeader:
info.isRowHeader=False
else:
return False
headerCellTracker.removeHeaderCellInfo(info)
self.WinwordDocumentObject.bookmarks(info.name).delete()
if info.isColumnHeader or info.isRowHeader:
self.setAsHeaderCell(cell,isColumnHeader=info.isColumnHeader,isRowHeader=info.isRowHeader)
return True
def fetchAssociatedHeaderCellText(self,cell,columnHeader=False):
table=cell.range.tables[1]
rowNumber=cell.rowIndex
columnNumber=cell.columnIndex
headerCellTracker=self.getHeaderCellTrackerForTable(table)
for info in headerCellTracker.iterPossibleHeaderCellInfosFor(rowNumber,columnNumber,columnHeader=columnHeader):
textList=[]
if columnHeader:
for headerRowNumber in xrange(info.rowNumber,info.rowNumber+info.rowSpan):
tempColumnNumber=columnNumber
while tempColumnNumber>=1:
try:
headerCell=table.cell(headerRowNumber,tempColumnNumber)
except COMError:
tempColumnNumber-=1
continue
break
textList.append(headerCell.range.text)
else:
for headerColumnNumber in xrange(info.columnNumber,info.columnNumber+info.colSpan):
tempRowNumber=rowNumber
while tempRowNumber>=1:
try:
headerCell=table.cell(tempRowNumber,headerColumnNumber)
except COMError:
tempRowNumber-=1
continue
break
textList.append(headerCell.range.text)
text=" ".join(textList)
if text:
return text
def script_setColumnHeader(self,gesture):
scriptCount=scriptHandler.getLastScriptRepeatCount()
if not config.conf['documentFormatting']['reportTableHeaders']:
# Translators: a message reported in the SetColumnHeader script for Microsoft Word.
ui.message(_("Cannot set headers. Please enable reporting of table headers in Document Formatting Settings"))
return
try:
cell=self.WinwordSelectionObject.cells[1]
except COMError:
# Translators: a message when trying to perform an action on a cell when not in one in Microsoft word
ui.message(_("Not in a table cell"))
return
if scriptCount==0:
if self.setAsHeaderCell(cell,isColumnHeader=True,isRowHeader=False):
# Translators: a message reported in the SetColumnHeader script for Microsoft Word.
ui.message(_("Set row {rowNumber} column {columnNumber} as start of column headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
else:
# Translators: a message reported in the SetColumnHeader script for Microsoft Word.
ui.message(_("Already set row {rowNumber} column {columnNumber} as start of column headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
elif scriptCount==1:
if self.forgetHeaderCell(cell,isColumnHeader=True,isRowHeader=False):
# Translators: a message reported in the SetColumnHeader script for Microsoft Word.
ui.message(_("Removed row {rowNumber} column {columnNumber} from column headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
else:
# Translators: a message reported in the SetColumnHeader script for Microsoft Word.
ui.message(_("Cannot find row {rowNumber} column {columnNumber} in column headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
script_setColumnHeader.__doc__=_("Pressing once will set this cell as the first column header for any cells lower and to the right of it within this table. Pressing twice will forget the current column header for this cell.")
def script_setRowHeader(self,gesture):
scriptCount=scriptHandler.getLastScriptRepeatCount()
if not config.conf['documentFormatting']['reportTableHeaders']:
# Translators: a message reported in the SetRowHeader script for Microsoft Word.
ui.message(_("Cannot set headers. Please enable reporting of table headers in Document Formatting Settings"))
return
try:
cell=self.WinwordSelectionObject.cells[1]
except COMError:
# Translators: a message when trying to perform an action on a cell when not in one in Microsoft word
ui.message(_("Not in a table cell"))
return
if scriptCount==0:
if self.setAsHeaderCell(cell,isColumnHeader=False,isRowHeader=True):
# Translators: a message reported in the SetRowHeader script for Microsoft Word.
ui.message(_("Set row {rowNumber} column {columnNumber} as start of row headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
else:
# Translators: a message reported in the SetRowHeader script for Microsoft Word.
ui.message(_("Already set row {rowNumber} column {columnNumber} as start of row headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
elif scriptCount==1:
if self.forgetHeaderCell(cell,isColumnHeader=False,isRowHeader=True):
# Translators: a message reported in the SetRowHeader script for Microsoft Word.
ui.message(_("Removed row {rowNumber} column {columnNumber} from row headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
else:
# Translators: a message reported in the SetRowHeader script for Microsoft Word.
ui.message(_("Cannot find row {rowNumber} column {columnNumber} in row headers").format(rowNumber=cell.rowIndex,columnNumber=cell.columnIndex))
script_setRowHeader.__doc__=_("Pressing once will set this cell as the first row header for any cells lower and to the right of it within this table. Pressing twice will forget the current row header for this cell.")
def script_reportCurrentHeaders(self,gesture):
cell=self.WinwordSelectionObject.cells[1]
rowText=self.fetchAssociatedHeaderCellText(cell,False)
columnText=self.fetchAssociatedHeaderCellText(cell,True)
ui.message("Row %s, column %s"%(rowText or "empty",columnText or "empty"))
def _get_WinwordVersion(self):
if not hasattr(self,'_WinwordVersion'):
self._WinwordVersion=float(self.WinwordApplicationObject.version)
return self._WinwordVersion
def _get_documentWindowHandle(self):
return self.windowHandle
def _get_WinwordWindowObject(self):
if not getattr(self,'_WinwordWindowObject',None):
try:
pDispatch=oleacc.AccessibleObjectFromWindow(self.documentWindowHandle,winUser.OBJID_NATIVEOM,interface=comtypes.automation.IDispatch)
except (COMError, WindowsError):
log.debugWarning("Could not get MS Word object model from window %s with class %s"%(self.documentWindowHandle,winUser.getClassName(self.documentWindowHandle)),exc_info=True)
return None
self._WinwordWindowObject=comtypes.client.dynamic.Dispatch(pDispatch)
return self._WinwordWindowObject
def _get_WinwordDocumentObject(self):
if not getattr(self,'_WinwordDocumentObject',None):
windowObject=self.WinwordWindowObject
if not windowObject: return None
self._WinwordDocumentObject=windowObject.document
return self._WinwordDocumentObject
def _get_WinwordApplicationObject(self):
if not getattr(self,'_WinwordApplicationObject',None):
self._WinwordApplicationObject=self.WinwordWindowObject.application
return self._WinwordApplicationObject
def _get_WinwordSelectionObject(self):
if not getattr(self,'_WinwordSelectionObject',None):
windowObject=self.WinwordWindowObject
if not windowObject: return None
self._WinwordSelectionObject=windowObject.selection
return self._WinwordSelectionObject
def _WaitForValueChangeForAction(self,action,fetcher,timeout=0.15):
oldVal=fetcher()
action()
startTime=curTime=time.time()
curVal=fetcher()
while curVal==oldVal and (curTime-startTime)<timeout:
time.sleep(0.01)
curVal=fetcher()
curTime=time.time()
return curVal
def script_toggleBold(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.bold)
if val:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Bold on"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Bold off"))
def script_toggleItalic(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.italic)
if val:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Italic on"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Italic off"))
def script_toggleUnderline(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.underline)
if val:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Underline on"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Underline off"))
def script_toggleAlignment(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.paragraphFormat.alignment)
alignmentMessages={
# Translators: a an alignment in Microsoft Word
wdAlignParagraphLeft:_("Left aligned"),
# Translators: a an alignment in Microsoft Word
wdAlignParagraphCenter:_("centered"),
# Translators: a an alignment in Microsoft Word
wdAlignParagraphRight:_("Right aligned"),
# Translators: a an alignment in Microsoft Word
wdAlignParagraphJustify:_("Justified"),
}
msg=alignmentMessages.get(val)
if msg:
ui.message(msg)
def script_toggleSuperscriptSubscript(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: (self.WinwordSelectionObject.font.superscript,self.WinwordSelectionObject.font.subscript))
if val[0]:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Superscript"))
elif val[1]:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Subscript"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Baseline"))
def script_moveParagraphDown(self,gesture):
oldBookmark=self.makeTextInfo(textInfos.POSITION_CARET).bookmark
gesture.send()
if self._hasCaretMoved(oldBookmark)[0]:
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
info.collapse()
info.move(textInfos.UNIT_PARAGRAPH,-1,endPoint="start")
lastParaText=info.text.strip()
if lastParaText:
# Translators: a message reported when a paragraph is moved below another paragraph
ui.message(_("Moved below %s")%lastParaText)
else:
# Translators: a message reported when a paragraph is moved below a blank paragraph
ui.message(_("Moved below blank paragraph"))
def script_moveParagraphUp(self,gesture):
oldBookmark=self.makeTextInfo(textInfos.POSITION_CARET).bookmark
gesture.send()
if self._hasCaretMoved(oldBookmark)[0]:
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
info.collapse()
info.move(textInfos.UNIT_PARAGRAPH,1)
info.expand(textInfos.UNIT_PARAGRAPH)
lastParaText=info.text.strip()
if lastParaText:
# Translators: a message reported when a paragraph is moved above another paragraph
ui.message(_("Moved above %s")%lastParaText)
else:
# Translators: a message reported when a paragraph is moved above a blank paragraph
ui.message(_("Moved above blank paragraph"))
def script_increaseDecreaseOutlineLevel(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.paragraphFormat.outlineLevel)
style=self.WinwordSelectionObject.style.nameLocal
# Translators: the message when the outline level / style is changed in Microsoft word
ui.message(_("{styleName} style, outline level {outlineLevel}").format(styleName=style,outlineLevel=val))
def script_increaseDecreaseFontSize(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.size)
# Translators: a message when increasing or decreasing font size in Microsoft Word
ui.message(_("{size:g} point font").format(size=val))
def script_caret_moveByCell(self,gesture):
gesture.send()
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
inTable=info._rangeObj.tables.count>0
isCollapsed=info.isCollapsed
if inTable:
info.expand(textInfos.UNIT_CELL)
speech.speakTextInfo(info,reason=controlTypes.REASON_FOCUS)
braille.handler.handleCaretMove(self)
def script_tab(self,gesture):
gesture.send()
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
inTable=info._rangeObj.tables.count>0
isCollapsed=info.isCollapsed
if inTable and isCollapsed:
info.expand(textInfos.UNIT_CELL)
isCollapsed=False
if not isCollapsed:
speech.speakTextInfo(info,reason=controlTypes.REASON_FOCUS)
braille.handler.handleCaretMove(self)
if isCollapsed:
offset=info._rangeObj.information(wdHorizontalPositionRelativeToPage)
msg=self.getLocalizedMeasurementTextForPointSize(offset)
ui.message(msg)
if info._rangeObj.paragraphs[1].range.start==info._rangeObj.start:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET)
def getLocalizedMeasurementTextForPointSize(self,offset):
options=self.WinwordApplicationObject.options
useCharacterUnit=options.useCharacterUnit
if useCharacterUnit:
offset=offset/self.WinwordSelectionObject.font.size
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} characters").format(offset=offset)
else:
unit=options.measurementUnit
if unit==wdInches:
offset=offset/72.0
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} inches").format(offset=offset)
elif unit==wdCentimeters:
offset=offset/28.35
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} centimeters").format(offset=offset)
elif unit==wdMillimeters:
offset=offset/2.835
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} millimeters").format(offset=offset)
elif unit==wdPoints:
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} points").format(offset=offset)
elif unit==wdPicas:
offset=offset/12.0
# Translators: a measurement in Microsoft Word
# See http://support.microsoft.com/kb/76388 for details.
return _("{offset:.3g} picas").format(offset=offset)
def script_reportCurrentComment(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
fields=info.getTextWithFields(formatConfig={'reportComments':True})
for field in reversed(fields):
if isinstance(field,textInfos.FieldCommand) and isinstance(field.field,textInfos.FormatField):
commentReference=field.field.get('comment')
if commentReference:
offset=int(commentReference)
range=self.WinwordDocumentObject.range(offset,offset+1)
try:
text=range.comments[1].range.text
except COMError:
break
if text:
ui.message(text)
return
# Translators: a message when there is no comment to report in Microsoft Word
ui.message(_("No comments"))
# Translators: a description for a script
script_reportCurrentComment.__doc__=_("Reports the text of the comment where the System caret is located.")
def script_changeLineSpacing(self,gesture):
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda:self.WinwordSelectionObject.ParagraphFormat.LineSpacingRule)
if val == wdLineSpaceSingle:
# Translators: a message when switching to single line spacing in Microsoft word
ui.message(_("Single line spacing"))
elif val == wdLineSpaceDouble:
# Translators: a message when switching to double line spacing in Microsoft word
ui.message(_("Double line spacing"))
elif val == wdLineSpace1pt5:
# Translators: a message when switching to 1.5 line spaceing in Microsoft word
ui.message(_("1.5 line spacing"))
def _moveInTable(self,row=True,forward=True):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
formatConfig=config.conf['documentFormatting'].copy()
formatConfig['reportTables']=True
commandList=info.getTextWithFields(formatConfig)
if len(commandList)<3 or commandList[1].field.get('role',None)!=controlTypes.ROLE_TABLE or commandList[2].field.get('role',None)!=controlTypes.ROLE_TABLECELL:
# Translators: The message reported when a user attempts to use a table movement command
# when the cursor is not withnin a table.
ui.message(_("Not in table"))
return False
rowCount=commandList[1].field.get('table-rowcount',1)
columnCount=commandList[1].field.get('table-columncount',1)
rowNumber=commandList[2].field.get('table-rownumber',1)
columnNumber=commandList[2].field.get('table-columnnumber',1)
try:
table=info._rangeObj.tables[1]
except COMError:
log.debugWarning("Could not get MS Word table object indicated in XML")
ui.message(_("Not in table"))
return False
_cell=table.cell
getCell=lambda thisIndex,otherIndex: _cell(thisIndex,otherIndex) if row else _cell(otherIndex,thisIndex)
thisIndex=rowNumber if row else columnNumber
otherIndex=columnNumber if row else rowNumber
thisLimit=(rowCount if row else columnCount) if forward else 1
limitOp=operator.le if forward else operator.ge
incdecFunc=operator.add if forward else operator.sub
foundCell=None
curOtherIndex=otherIndex
while curOtherIndex>0:
curThisIndex=incdecFunc(thisIndex,1)
while limitOp(curThisIndex,thisLimit):
try:
foundCell=getCell(curThisIndex,curOtherIndex).range
except COMError:
pass
if foundCell: break
curThisIndex=incdecFunc(curThisIndex,1)
if foundCell: break
curOtherIndex-=1
if not foundCell:
ui.message(_("Edge of table"))
return False
newInfo=WordDocumentTextInfo(self,textInfos.POSITION_CARET,_rangeObj=foundCell)
speech.speakTextInfo(newInfo,reason=controlTypes.REASON_CARET)
newInfo.collapse()
newInfo.updateCaret()
return True
def script_nextRow(self,gesture):
self._moveInTable(row=True,forward=True)
def script_previousRow(self,gesture):
self._moveInTable(row=True,forward=False)
def script_nextColumn(self,gesture):
self._moveInTable(row=False,forward=True)
def script_previousColumn(self,gesture):
self._moveInTable(row=False,forward=False)
def script_nextParagraph(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
# #4375: can't use self.move here as it may check document.chracters.count which can take for ever on large documents.
info._rangeObj.move(wdParagraph,1)
info.updateCaret()
self._caretScriptPostMovedHelper(textInfos.UNIT_PARAGRAPH,gesture,None)
script_nextParagraph.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_previousParagraph(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
# #4375: keeping cemetrical with nextParagraph script.
info._rangeObj.move(wdParagraph,-1)
info.updateCaret()
self._caretScriptPostMovedHelper(textInfos.UNIT_PARAGRAPH,gesture,None)
script_previousParagraph.resumeSayAllMode=sayAllHandler.CURSOR_CARET
__gestures = {
"kb:control+[":"increaseDecreaseFontSize",
"kb:control+]":"increaseDecreaseFontSize",
"kb:control+shift+,":"increaseDecreaseFontSize",
"kb:control+shift+.":"increaseDecreaseFontSize",
"kb:control+b":"toggleBold",
"kb:control+i":"toggleItalic",
"kb:control+u":"toggleUnderline",
"kb:control+=":"toggleSuperscriptSubscript",
"kb:control+shift+=":"toggleSuperscriptSubscript",
"kb:control+l":"toggleAlignment",
"kb:control+e":"toggleAlignment",
"kb:control+r":"toggleAlignment",
"kb:control+j":"toggleAlignment",
"kb:alt+shift+downArrow":"moveParagraphDown",
"kb:alt+shift+upArrow":"moveParagraphUp",
"kb:alt+shift+rightArrow":"increaseDecreaseOutlineLevel",
"kb:alt+shift+leftArrow":"increaseDecreaseOutlineLevel",
"kb:control+shift+n":"increaseDecreaseOutlineLevel",
"kb:control+alt+1":"increaseDecreaseOutlineLevel",
"kb:control+alt+2":"increaseDecreaseOutlineLevel",
"kb:control+alt+3":"increaseDecreaseOutlineLevel",
"kb:control+1":"changeLineSpacing",
"kb:control+2":"changeLineSpacing",
"kb:control+5":"changeLineSpacing",
"kb:tab": "tab",
"kb:shift+tab": "tab",
"kb:NVDA+shift+c":"setColumnHeader",
"kb:NVDA+shift+r":"setRowHeader",
"kb:NVDA+shift+h":"reportCurrentHeaders",
"kb:control+alt+upArrow": "previousRow",
"kb:control+alt+downArrow": "nextRow",
"kb:control+alt+leftArrow": "previousColumn",
"kb:control+alt+rightArrow": "nextColumn",
"kb:control+downArrow":"nextParagraph",
"kb:control+upArrow":"previousParagraph",
"kb:alt+home":"caret_moveByCell",
"kb:alt+end":"caret_moveByCell",
"kb:alt+pageUp":"caret_moveByCell",
"kb:alt+pageDown":"caret_moveByCell",
"kb:alt+shift+home":"caret_changeSelection",
"kb:alt+shift+end":"caret_changeSelection",
"kb:alt+shift+pageUp":"caret_changeSelection",
"kb:alt+shift+pageDown":"caret_changeSelection",
"kb:control+pageUp": "caret_moveByLine",
"kb:control+pageDown": "caret_moveByLine",
"kb:NVDA+alt+c":"reportCurrentComment",
}
class WordDocument_WwN(WordDocument):
def _get_documentWindowHandle(self):
w=NVDAHelper.localLib.findWindowWithClassInThread(self.windowThreadID,u"_WwG",True)
if not w:
log.debugWarning("Could not find window for class _WwG in thread.")
w=super(WordDocument_WwN,self).documentWindowHandle
return w
def _get_WinwordWindowObject(self):
window=super(WordDocument_WwN,self).WinwordWindowObject
if not window: return None
try:
return window.application.activeWindow.activePane
except COMError:
log.debugWarning("Unable to get activePane")
return window.application.windows[1].activePane
__gestures={
"kb:tab":None,
"kb:shift+tab":None,
}
class ElementsListDialog(browseMode.ElementsListDialog):
ELEMENT_TYPES=(browseMode.ElementsListDialog.ELEMENT_TYPES[0],browseMode.ElementsListDialog.ELEMENT_TYPES[1],
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("annotation", _("&Annotations")),
)
| 1 | 18,558 | I assume it raises COMError? If so perhaps best to just catch that specifically, so as to not hide other more critical errors. | nvaccess-nvda | py |
@@ -328,6 +328,7 @@ var directives = []string{
"git", // github.com/abiosoft/caddy-git
// directives that add middleware to the stack
+ "minify", // github.com/hacdias/caddy-minify
"log",
"gzip",
"errors", | 1 | package httpserver
import (
"flag"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyfile"
"github.com/mholt/caddy/caddytls"
)
const serverType = "http"
func init() {
flag.StringVar(&Host, "host", DefaultHost, "Default host")
flag.StringVar(&Port, "port", DefaultPort, "Default port")
flag.StringVar(&Root, "root", DefaultRoot, "Root path of default site")
flag.DurationVar(&GracefulTimeout, "grace", 5*time.Second, "Maximum duration of graceful shutdown") // TODO
flag.BoolVar(&HTTP2, "http2", true, "Use HTTP/2")
flag.BoolVar(&QUIC, "quic", false, "Use experimental QUIC")
caddy.RegisterServerType(serverType, caddy.ServerType{
Directives: directives,
DefaultInput: func() caddy.Input {
if Port == DefaultPort && Host != "" {
// by leaving the port blank in this case we give auto HTTPS
// a chance to set the port to 443 for us
return caddy.CaddyfileInput{
Contents: []byte(fmt.Sprintf("%s\nroot %s", Host, Root)),
ServerTypeName: serverType,
}
}
return caddy.CaddyfileInput{
Contents: []byte(fmt.Sprintf("%s:%s\nroot %s", Host, Port, Root)),
ServerTypeName: serverType,
}
},
NewContext: newContext,
})
caddy.RegisterCaddyfileLoader("short", caddy.LoaderFunc(shortCaddyfileLoader))
caddy.RegisterParsingCallback(serverType, "tls", activateHTTPS)
caddytls.RegisterConfigGetter(serverType, func(key string) *caddytls.Config { return GetConfig(key).TLS })
}
var contexts []*httpContext
func newContext() caddy.Context {
context := &httpContext{keysToSiteConfigs: make(map[string]*SiteConfig)}
// put the new context at start to allow setup of directives on new instance
contexts = append([]*httpContext{context}, contexts...)
return context
}
type httpContext struct {
// keysToSiteConfigs maps an address at the top of a
// server block (a "key") to its SiteConfig. Not all
// SiteConfigs will be represented here, only ones
// that appeared in the Caddyfile.
keysToSiteConfigs map[string]*SiteConfig
// siteConfigs is the master list of all site configs.
siteConfigs []*SiteConfig
}
// InspectServerBlocks make sure that everything checks out before
// executing directives and otherwise prepares the directives to
// be parsed and executed.
func (h *httpContext) InspectServerBlocks(sourceFile string, serverBlocks []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) {
// For each address in each server block, make a new config
for _, sb := range serverBlocks {
for _, key := range sb.Keys {
key = strings.ToLower(key)
if _, dup := h.keysToSiteConfigs[key]; dup {
return serverBlocks, fmt.Errorf("duplicate site address: %s", key)
}
addr, err := standardizeAddress(key)
if err != nil {
return serverBlocks, err
}
// Save the config to our master list, and key it for lookups
cfg := &SiteConfig{
Addr: addr,
Root: Root,
TLS: &caddytls.Config{Hostname: addr.Host},
HiddenFiles: []string{sourceFile},
}
h.siteConfigs = append(h.siteConfigs, cfg)
h.keysToSiteConfigs[key] = cfg
}
}
// For sites that have gzip (which gets chained in
// before the error handler) we should ensure that the
// errors directive also appears so error pages aren't
// written after the gzip writer is closed.
for _, sb := range serverBlocks {
_, hasGzip := sb.Tokens["gzip"]
_, hasErrors := sb.Tokens["errors"]
if hasGzip && !hasErrors {
sb.Tokens["errors"] = []caddyfile.Token{{Text: "errors"}}
}
}
return serverBlocks, nil
}
// MakeServers uses the newly-created siteConfigs to
// create and return a list of server instances.
func (h *httpContext) MakeServers() ([]caddy.Server, error) {
// make sure TLS is disabled for explicitly-HTTP sites
// (necessary when HTTP address shares a block containing tls)
for _, cfg := range h.siteConfigs {
if cfg.TLS.Enabled && (cfg.Addr.Port == "80" || cfg.Addr.Scheme == "http") {
cfg.TLS.Enabled = false
log.Printf("[WARNING] TLS disabled for %s", cfg.Addr)
}
}
// we must map (group) each config to a bind address
groups, err := groupSiteConfigsByListenAddr(h.siteConfigs)
if err != nil {
return nil, err
}
// then we create a server for each group
var servers []caddy.Server
for addr, group := range groups {
s, err := NewServer(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
}
return servers, nil
}
// GetConfig gets a SiteConfig that is keyed by addrKey.
// It creates an empty one in the latest context if
// the key does not exist in any context, so it
// will never return nil. If no contexts exist (which
// should never happen except in tests), it creates a
// new context in which to put it.
func GetConfig(addrKey string) *SiteConfig {
for _, context := range contexts {
if cfg, ok := context.keysToSiteConfigs[addrKey]; ok {
return cfg
}
}
if len(contexts) == 0 {
// this shouldn't happen except in tests
newContext()
}
cfg := &SiteConfig{Root: Root, TLS: new(caddytls.Config)}
defaultCtx := contexts[len(contexts)-1]
defaultCtx.siteConfigs = append(defaultCtx.siteConfigs, cfg)
defaultCtx.keysToSiteConfigs[addrKey] = cfg
return cfg
}
// shortCaddyfileLoader loads a Caddyfile if positional arguments are
// detected, or, in other words, if un-named arguments are provided to
// the program. A "short Caddyfile" is one in which each argument
// is a line of the Caddyfile. The default host and port are prepended
// according to the Host and Port values.
func shortCaddyfileLoader(serverType string) (caddy.Input, error) {
if flag.NArg() > 0 && serverType == "http" {
confBody := fmt.Sprintf("%s:%s\n%s", Host, Port, strings.Join(flag.Args(), "\n"))
return caddy.CaddyfileInput{
Contents: []byte(confBody),
Filepath: "args",
ServerTypeName: serverType,
}, nil
}
return nil, nil
}
// groupSiteConfigsByListenAddr groups site configs by their listen
// (bind) address, so sites that use the same listener can be served
// on the same server instance. The return value maps the listen
// address (what you pass into net.Listen) to the list of site configs.
// This function does NOT vet the configs to ensure they are compatible.
func groupSiteConfigsByListenAddr(configs []*SiteConfig) (map[string][]*SiteConfig, error) {
groups := make(map[string][]*SiteConfig)
for _, conf := range configs {
if caddy.IsLoopback(conf.Addr.Host) && conf.ListenHost == "" {
// special case: one would not expect a site served
// at loopback to be connected to from the outside.
conf.ListenHost = conf.Addr.Host
}
if conf.Addr.Port == "" {
conf.Addr.Port = Port
}
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(conf.ListenHost, conf.Addr.Port))
if err != nil {
return nil, err
}
addrstr := addr.String()
groups[addrstr] = append(groups[addrstr], conf)
}
return groups, nil
}
// AddMiddleware adds a middleware to a site's middleware stack.
func (sc *SiteConfig) AddMiddleware(m Middleware) {
sc.middleware = append(sc.middleware, m)
}
// Address represents a site address. It contains
// the original input value, and the component
// parts of an address.
type Address struct {
Original, Scheme, Host, Port, Path string
}
// String returns a human-friendly print of the address.
func (a Address) String() string {
if a.Host == "" && a.Port == "" {
return ""
}
scheme := a.Scheme
if scheme == "" {
if a.Port == "443" {
scheme = "https"
} else {
scheme = "http"
}
}
s := scheme
if s != "" {
s += "://"
}
s += a.Host
if a.Port != "" &&
((scheme == "https" && a.Port != "443") ||
(scheme == "http" && a.Port != "80")) {
s += ":" + a.Port
}
if a.Path != "" {
s += a.Path
}
return s
}
// VHost returns a sensible concatenation of Host:Port/Path from a.
// It's basically the a.Original but without the scheme.
func (a Address) VHost() string {
if idx := strings.Index(a.Original, "://"); idx > -1 {
return a.Original[idx+3:]
}
return a.Original
}
// standardizeAddress parses an address string into a structured format with separate
// scheme, host, and port portions, as well as the original input string.
func standardizeAddress(str string) (Address, error) {
input := str
// Split input into components (prepend with // to assert host by default)
if !strings.Contains(str, "//") {
str = "//" + str
}
u, err := url.Parse(str)
if err != nil {
return Address{}, err
}
// separate host and port
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
host, port, err = net.SplitHostPort(u.Host + ":")
if err != nil {
host = u.Host
}
}
// see if we can set port based off scheme
if port == "" {
if u.Scheme == "http" {
port = "80"
} else if u.Scheme == "https" {
port = "443"
}
}
// repeated or conflicting scheme is confusing, so error
if u.Scheme != "" && (port == "http" || port == "https") {
return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input)
}
// error if scheme and port combination violate convention
if (u.Scheme == "http" && port == "443") || (u.Scheme == "https" && port == "80") {
return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input)
}
// standardize http and https ports to their respective port numbers
if port == "http" {
u.Scheme = "http"
port = "80"
} else if port == "https" {
u.Scheme = "https"
port = "443"
}
return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err
}
// directives is the list of all directives known to exist for the
// http server type, including non-standard (3rd-party) directives.
// The ordering of this list is important.
var directives = []string{
// primitive actions that set up the fundamental vitals of each config
"root",
"tls",
"bind",
// services/utilities, or other directives that don't necessarily inject handlers
"startup",
"shutdown",
"realip", // github.com/captncraig/caddy-realip
"git", // github.com/abiosoft/caddy-git
// directives that add middleware to the stack
"log",
"gzip",
"errors",
"ipfilter", // github.com/pyed/ipfilter
"search", // github.com/pedronasser/caddy-search
"header",
"cors", // github.com/captncraig/cors/caddy
"rewrite",
"redir",
"ext",
"mime",
"basicauth",
"jwt", // github.com/BTBurke/caddy-jwt
"jsonp", // github.com/pschlump/caddy-jsonp
"upload", // blitznote.com/src/caddy.upload
"internal",
"pprof",
"expvar",
"proxy",
"fastcgi",
"websocket",
"markdown",
"templates",
"browse",
"hugo", // github.com/hacdias/caddy-hugo
"mailout", // github.com/SchumacherFM/mailout
"prometheus", // github.com/miekg/caddy-prometheus
}
const (
// DefaultHost is the default host.
DefaultHost = ""
// DefaultPort is the default port.
DefaultPort = "2015"
// DefaultRoot is the default root folder.
DefaultRoot = "."
)
// These "soft defaults" are configurable by
// command line flags, etc.
var (
// Root is the site root
Root = DefaultRoot
// Host is the site host
Host = DefaultHost
// Port is the site port
Port = DefaultPort
// GracefulTimeout is the maximum duration of a graceful shutdown.
GracefulTimeout time.Duration
// HTTP2 indicates whether HTTP2 is enabled or not.
HTTP2 bool
// QUIC indicates whether QUIC is enabled or not.
QUIC bool
)
| 1 | 8,445 | Since minify can generate errors, it should at least go after the errors middleware. And you definitely don't want to be minifying after the gzip writer has closed. | caddyserver-caddy | go |
@@ -31,7 +31,8 @@ type Tracer struct {
// Version is the instrumentation version.
Version string
- config *config
+ config *config
+ provider *TracerProvider
}
// Start creates a span. If t is configured with a SpanRecorder its OnStart | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oteltest // import "go.opentelemetry.io/otel/oteltest"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var _ trace.Tracer = (*Tracer)(nil)
// Tracer is an OpenTelemetry Tracer implementation used for testing.
type Tracer struct {
// Name is the instrumentation name.
Name string
// Version is the instrumentation version.
Version string
config *config
}
// Start creates a span. If t is configured with a SpanRecorder its OnStart
// method will be called after the created Span has been initialized.
func (t *Tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
c := trace.NewSpanStartConfig(opts...)
startTime := time.Now()
if st := c.Timestamp(); !st.IsZero() {
startTime = st
}
span := &Span{
tracer: t,
startTime: startTime,
attributes: make(map[attribute.Key]attribute.Value),
links: []trace.Link{},
spanKind: c.SpanKind(),
}
if c.NewRoot() {
span.spanContext = trace.SpanContext{}
} else {
span.spanContext = t.config.SpanContextFunc(ctx)
if current := trace.SpanContextFromContext(ctx); current.IsValid() {
span.spanContext = span.spanContext.WithTraceID(current.TraceID())
span.parentSpanID = current.SpanID()
}
}
for _, link := range c.Links() {
for i, sl := range span.links {
if sl.SpanContext.SpanID() == link.SpanContext.SpanID() &&
sl.SpanContext.TraceID() == link.SpanContext.TraceID() &&
sl.SpanContext.TraceFlags() == link.SpanContext.TraceFlags() &&
sl.SpanContext.TraceState().String() == link.SpanContext.TraceState().String() {
span.links[i].Attributes = link.Attributes
break
}
}
span.links = append(span.links, link)
}
span.SetName(name)
span.SetAttributes(c.Attributes()...)
if t.config.SpanRecorder != nil {
t.config.SpanRecorder.OnStart(span)
}
return trace.ContextWithSpan(ctx, span), span
}
| 1 | 15,750 | nit: we could remove the `config` field as it is a duplicate of the `provider.config` field. | open-telemetry-opentelemetry-go | go |
@@ -112,7 +112,8 @@ class ForsetiServerInstaller(ForsetiInstaller):
# Create firewall rule to open only port tcp:50051
# within the internal network (ip-ranges - 10.128.0.0/9)
gcloud.create_firewall_rule(
- self.format_firewall_rule_name('forseti-server-allow-grpc'),
+ self.format_firewall_rule_name(
+ 'forseti-server-allow-grpc-internal'),
[self.gcp_service_account],
constants.FirewallRuleAction.ALLOW,
['tcp:50051'], | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti Server installer."""
from __future__ import print_function
import random
from configs.server_config import ServerConfig
from forseti_installer import ForsetiInstaller
from util import constants
from util import files
from util import gcloud
from util import utils
class ForsetiServerInstaller(ForsetiInstaller):
"""Forseti server installer"""
# pylint: disable=too-many-instance-attributes
# Having ten variables is reasonable in this case.
gsuite_service_account = None
has_roles_script = False
setup_explain = True
enable_write_access = False
resource_root_id = None
access_target = None
target_id = None
def __init__(self, **kwargs):
"""Init
Args:
kwargs (dict): The kwargs.
"""
super(ForsetiServerInstaller, self).__init__()
self.config = ServerConfig(**kwargs)
def preflight_checks(self):
"""Pre-flight checks for server instance"""
super(ForsetiServerInstaller, self).preflight_checks()
self.determine_access_target()
self.should_enable_write_access()
self.format_gsuite_service_acct_id()
self.should_grant_access()
gcloud.enable_apis(self.config.dry_run)
self.gsuite_service_account = gcloud.create_or_reuse_service_acct(
'gsuite_service_account',
self.gsuite_service_account,
self.config.advanced_mode,
self.config.dry_run)
self.get_email_settings()
def deploy(self, deployment_tpl_path, conf_file_path, bucket_name):
"""Deploy Forseti using the deployment template.
Grant access to service account.
Args:
deployment_tpl_path (str): Deployment template path
conf_file_path (str): Configuration file path
bucket_name (str): Name of the GCS bucket
Returns:
bool: Whether or not the deployment was successful
str: Deployment name
"""
success, deployment_name = super(ForsetiServerInstaller, self).deploy(
deployment_tpl_path, conf_file_path, bucket_name)
if success:
self.has_roles_script = gcloud.grant_server_svc_acct_roles(
self.enable_write_access,
self.access_target,
self.target_id,
self.project_id,
self.gsuite_service_account,
self.gcp_service_account,
self.user_can_grant_roles)
# Copy the rule directory to the GCS bucket
files.copy_file_to_destination(
constants.RULES_DIR_PATH, bucket_name,
is_directory=True, dry_run=self.config.dry_run)
instance_name = '{}-vm'.format(deployment_name)
self.wait_until_vm_initialized(instance_name)
# Create firewall rule to block out all the ingress traffic
gcloud.create_firewall_rule(
self.format_firewall_rule_name('forseti-server-deny-all'),
[self.gcp_service_account],
constants.FirewallRuleAction.DENY,
['icmp', 'udp', 'tcp'],
constants.FirewallRuleDirection.INGRESS,
1)
# Create firewall rule to open only port tcp:50051
# within the internal network (ip-ranges - 10.128.0.0/9)
gcloud.create_firewall_rule(
self.format_firewall_rule_name('forseti-server-allow-grpc'),
[self.gcp_service_account],
constants.FirewallRuleAction.ALLOW,
['tcp:50051'],
constants.FirewallRuleDirection.INGRESS,
0,
'10.128.0.0/9')
return success, deployment_name
def format_firewall_rule_name(self, rule_name):
"""Format firewall rule name.
Args:
rule_name (str): Name of the firewall rule
Returns:
str: Firewall rule name
"""
return '{}-{}'.format(rule_name, self.config.datetimestamp)
def should_grant_access(self):
"""Inform user that they need IAM access to grant Forseti access."""
utils.print_banner('Current IAM access')
choice = None
if not self.config.advanced_mode:
choice = 'y'
while choice != 'y' and choice != 'n':
choice = raw_input(constants.QUESTION_ACCESS_TO_GRANT_ROLES.format(
self.resource_root_id)).strip().lower()
if choice == 'y':
self.user_can_grant_roles = True
print('Will attempt to grant roles on the target %s.' %
self.resource_root_id)
else:
self.user_can_grant_roles = False
print('Will NOT attempt to grant roles on the target %s.' %
self.resource_root_id)
def get_deployment_values(self):
"""Get deployment values
Returns:
dict: A dictionary of values needed to generate
the forseti deployment template
"""
bucket_name = self.generate_bucket_name()
return {
'CLOUDSQL_REGION': self.config.cloudsql_region,
'CLOUDSQL_INSTANCE_NAME': self.config.cloudsql_instance,
'SCANNER_BUCKET': bucket_name[len('gs://'):],
'BUCKET_LOCATION': self.config.bucket_location,
'GCP_SERVER_SERVICE_ACCOUNT': self.gcp_service_account,
'GSUITE_SERVICE_ACCOUNT': self.gsuite_service_account,
'BRANCH_OR_RELEASE': 'branch-name: "{}"'.format(self.branch),
'GSUITE_ADMIN_EMAIL': self.config.gsuite_superadmin_email,
'ROOT_RESOURCE_ID': self.resource_root_id,
'rand_minute': random.randint(0, 59)
}
def get_configuration_values(self):
"""Get configuration values
Returns:
dict: A dictionary of values needed to generate
the forseti configuration file
"""
bucket_name = self.generate_bucket_name()
return {
'EMAIL_RECIPIENT': self.config.notification_recipient_email,
'EMAIL_SENDER': self.config.notification_sender_email,
'SENDGRID_API_KEY': self.config.sendgrid_api_key,
'SCANNER_BUCKET': bucket_name[len('gs://'):],
'DOMAIN_SUPER_ADMIN_EMAIL': self.config.gsuite_superadmin_email
}
def determine_access_target(self):
"""Determine where to enable Forseti access.
Either org, folder, or project level.
"""
utils.print_banner('Forseti access target')
if not self.config.advanced_mode:
self.access_target = constants.RESOURCE_TYPES[0]
self.target_id = self.organization_id
while not self.target_id:
if self.setup_explain:
# If user wants to setup Explain, they must setup
# access on an organization.
choice_index = 1
else:
try:
print(constants.MESSAGE_FORSETI_CONFIGURATION_ACCESS_LEVEL)
for (i, choice) in enumerate(constants.RESOURCE_TYPES):
print('[%s] %s' % (i+1, choice))
choice_input = raw_input(
constants.QUESTION_FORSETI_CONFIGURATION_ACCESS_LEVEL
).strip()
choice_index = int(choice_input)
except ValueError:
print('Invalid choice, try again.')
continue
if choice_index and choice_index <= len(constants.RESOURCE_TYPES):
self.access_target = constants.RESOURCE_TYPES[choice_index-1]
if self.access_target == 'organization':
self.target_id = gcloud.choose_organization()
elif self.access_target == 'folder':
self.target_id = gcloud.choose_folder(self.organization_id)
else:
self.target_id = gcloud.choose_project()
self.resource_root_id = utils.format_resource_id(
'%ss' % self.access_target, self.target_id)
print('Forseti will be granted access to: %s' %
self.resource_root_id)
def get_email_settings(self):
"""Ask user for specific setup values."""
if not self.config.sendgrid_api_key:
# Ask for SendGrid API Key
print(constants.MESSAGE_ASK_SENDGRID_API_KEY)
self.config.sendgrid_api_key = raw_input(
constants.QUESTION_SENDGRID_API_KEY).strip()
if self.config.sendgrid_api_key:
self.config.notification_sender_email = (
constants.NOTIFICATION_SENDER_EMAIL)
# Ask for notification recipient email
if not self.config.notification_recipient_email:
self.config.notification_recipient_email = raw_input(
constants.QUESTION_NOTIFICATION_RECIPIENT_EMAIL).strip()
if not self.config.gsuite_superadmin_email:
# Ask for G Suite super admin email
print(constants.MESSAGE_ASK_GSUITE_SUPERADMIN_EMAIL)
self.config.gsuite_superadmin_email = raw_input(
constants.QUESTION_GSUITE_SUPERADMIN_EMAIL).strip()
def format_gsuite_service_acct_id(self):
"""Format the gsuite service account id"""
self.gsuite_service_account = utils.format_service_acct_id(
'gsuite',
'reader',
self.config.timestamp,
self.project_id)
def format_gcp_service_acct_id(self):
"""Format the service account ids."""
modifier = 'reader'
if self.enable_write_access:
modifier = 'readwrite'
self.gcp_service_account = utils.format_service_acct_id(
'gcp',
modifier,
self.config.timestamp,
self.project_id)
def should_enable_write_access(self):
"""Ask if user wants to enable write access for Forseti."""
utils.print_banner('Enable Forseti write access')
choice = None
if not self.config.advanced_mode:
choice = 'y'
while choice != 'y' and choice != 'n':
choice = raw_input(
constants.QUESTION_ENABLE_WRITE_ACCESS).strip().lower()
if choice == 'y':
self.enable_write_access = True
print('Forseti will have write access on %s.' %
self.resource_root_id)
def post_install_instructions(self, deploy_success, deployment_name,
deployment_tpl_path, forseti_conf_path,
bucket_name):
super(ForsetiServerInstaller, self).post_install_instructions(
deploy_success, deployment_name,
deployment_tpl_path, forseti_conf_path,
bucket_name)
if self.has_roles_script:
print(constants.MESSAGE_HAS_ROLE_SCRIPT.format(
self.resource_root_id))
if not self.config.sendgrid_api_key:
print(constants.MESSAGE_SKIP_EMAIL)
if self.config.gsuite_superadmin_email:
print(constants.MESSAGE_GSUITE_DATA_COLLECTION.format(
self.project_id,
self.organization_id,
self.gsuite_service_account))
else:
print(constants.MESSAGE_ENABLE_GSUITE_GROUP)
| 1 | 29,053 | Nit: from the Internet. | forseti-security-forseti-security | py |
@@ -7,11 +7,15 @@
package hdwallet
import (
+ "bytes"
"fmt"
"io/ioutil"
+ ecrypt "github.com/ethereum/go-ethereum/crypto"
+ "github.com/iotexproject/go-pkgs/crypto"
+ "github.com/iotexproject/iotex-address/address"
+ hdwallet "github.com/miguelmota/go-ethereum-hdwallet"
"github.com/spf13/cobra"
-
"github.com/tyler-smith/go-bip39"
"github.com/iotexproject/iotex-core/ioctl/config" | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package hdwallet
import (
"fmt"
"io/ioutil"
"github.com/spf13/cobra"
"github.com/tyler-smith/go-bip39"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/util/fileutil"
)
// Multi-language support
var (
createByMnemonicCmdShorts = map[config.Language]string{
config.English: "create hdwallet using mnemonic",
config.Chinese: "通过助记词创建新钱包",
}
createByMnemonicCmdUses = map[config.Language]string{
config.English: "create",
config.Chinese: "create 创建",
}
)
// hdwalletCreateCmd represents the hdwallet create command
var hdwalletCreateCmd = &cobra.Command{
Use: config.TranslateInLang(createByMnemonicCmdUses, config.UILanguage),
Short: config.TranslateInLang(createByMnemonicCmdShorts, config.UILanguage),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
cmd.SilenceUsage = true
err := hdwalletCreate()
return output.PrintError(err)
},
}
func hdwalletCreate() error {
if fileutil.FileExists(hdWalletConfigFile) {
output.PrintResult("already created hdwallet, if you forgot password,use delete/import command.")
return nil
}
output.PrintQuery("Set password\n")
password, err := util.ReadSecretFromStdin()
if err != nil {
return output.NewError(output.InputError, "failed to get password", err)
}
output.PrintQuery("Enter password again\n")
passwordAgain, err := util.ReadSecretFromStdin()
if err != nil {
return output.NewError(output.InputError, "failed to get password", err)
}
if password != passwordAgain {
return output.NewError(output.ValidationError, ErrPasswdNotMatch.Error(), nil)
}
entropy, _ := bip39.NewEntropy(128)
mnemonic, _ := bip39.NewMnemonic(entropy)
enctxt := append([]byte(mnemonic), util.HashSHA256([]byte(mnemonic))...)
enckey := util.HashSHA256([]byte(password))
out, err := util.Encrypt(enctxt, enckey)
if err != nil {
return output.NewError(output.ValidationError, "failed to encrypting mnemonic", nil)
}
if err := ioutil.WriteFile(hdWalletConfigFile, out, 0600); err != nil {
return output.NewError(output.WriteFileError,
fmt.Sprintf("failed to write to config file %s", hdWalletConfigFile), err)
}
output.PrintResult(fmt.Sprintf("Mnemonic pharse: %s\n"+
"It is used to recover your wallet in case you forgot the password. Write them down and store it in a safe place.", mnemonic))
return nil
}
| 1 | 22,766 | this is internal package, move to bottom and run 'make fmt' | iotexproject-iotex-core | go |
@@ -0,0 +1,14 @@
+// Copyright (c) .NET Foundation. All rights reserved.
+// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
+
+using System.Threading;
+
+namespace Microsoft.AspNetCore.Connections.Features
+{
+ public interface IConnectionLifetimeNotificationFeature
+ {
+ CancellationToken ConnectionClosing { get; set; }
+
+ void Close();
+ }
+} | 1 | 1 | 16,391 | Why is this better than ApplicationStopping? | aspnet-KestrelHttpServer | .cs |
|
@@ -70,8 +70,8 @@ type ChallengeSpec struct {
// +optional
Wildcard bool `json:"wildcard"`
- // Type is the type of ACME challenge this resource represents, e.g. "dns01"
- // or "http01".
+ // Type is the type of ACME challenge this resource represents.
+ // One of "http-01" or "dns-01".
Type ACMEChallengeType `json:"type"`
// Token is the ACME challenge token for this challenge. | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Challenge is a type to represent a Challenge request with an ACME server
// +k8s:openapi-gen=true
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state"
// +kubebuilder:printcolumn:name="Domain",type="string",JSONPath=".spec.dnsName"
// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC."
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=challenges
type Challenge struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec ChallengeSpec `json:"spec,omitempty"`
Status ChallengeStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ChallengeList is a list of Challenges
type ChallengeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Challenge `json:"items"`
}
type ChallengeSpec struct {
// URL is the URL of the ACME Challenge resource for this challenge.
// This can be used to lookup details about the status of this challenge.
URL string `json:"url"`
// AuthzURL is the URL to the ACME Authorization resource that this
// challenge is a part of.
AuthzURL string `json:"authzURL"`
// DNSName is the identifier that this challenge is for, e.g. example.com.
// If the requested DNSName is a 'wildcard', this field MUST be set to the
// non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`.
DNSName string `json:"dnsName"`
// Wildcard will be true if this challenge is for a wildcard identifier,
// for example '*.example.com'.
// +optional
Wildcard bool `json:"wildcard"`
// Type is the type of ACME challenge this resource represents, e.g. "dns01"
// or "http01".
Type ACMEChallengeType `json:"type"`
// Token is the ACME challenge token for this challenge.
// This is the raw value returned from the ACME server.
Token string `json:"token"`
// Key is the ACME challenge key for this challenge
// For HTTP01 challenges, this is the value that must be responded with to
// complete the HTTP01 challenge in the format:
// `<private key JWK thumbprint>.<key from acme server for challenge>`.
// For DNS01 challenges, this is the base64 encoded SHA256 sum of the
// `<private key JWK thumbprint>.<key from acme server for challenge>`
// text that must be set as the TXT record content.
Key string `json:"key"`
// Solver contains the domain solving configuration that should be used to
// solve this challenge resource.
Solver ACMEChallengeSolver `json:"solver"`
// IssuerRef references a properly configured ACME-type Issuer which should
// be used to create this Challenge.
// If the Issuer does not exist, processing will be retried.
// If the Issuer is not an 'ACME' Issuer, an error will be returned and the
// Challenge will be marked as failed.
IssuerRef cmmeta.ObjectReference `json:"issuerRef"`
}
type ChallengeStatus struct {
// Processing is used to denote whether this challenge should be processed
// or not.
// This field will only be set to true by the 'scheduling' component.
// It will only be set to false by the 'challenges' controller, after the
// challenge has reached a final state or timed out.
// If this field is set to false, the challenge controller will not take
// any more action.
// +optional
Processing bool `json:"processing"`
// Presented will be set to true if the challenge values for this challenge
// are currently 'presented'.
// This *does not* imply the self check is passing. Only that the values
// have been 'submitted' for the appropriate challenge mechanism (i.e. the
// DNS01 TXT record has been presented, or the HTTP01 configuration has been
// configured).
// +optional
Presented bool `json:"presented"`
// Reason contains human readable information on why the Challenge is in the
// current state.
// +optional
Reason string `json:"reason"`
// State contains the current 'state' of the challenge.
// If not set, the state of the challenge is unknown.
// +optional
State State `json:"state,omitempty"`
}
| 1 | 22,450 | Maybe worth expanding that these 2 are supported by cert-manager but other values exist | jetstack-cert-manager | go |
@@ -169,7 +169,7 @@ class MediaExtension extends \Twig_Extension
$options = array_merge($defaultOptions, $options);
- $options['src'] = $provider->generatePublicUrl($media, $format);
+ $options = $provider->getHelperProperties($media, $format, $options);
return $this->render($provider->getTemplate('helper_thumbnail'), array(
'media' => $media, | 1 | <?php
/*
* This file is part of sonata-project.
*
* (c) 2010 Thomas Rabaix
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Twig\Extension;
use Sonata\CoreBundle\Model\ManagerInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Provider\Pool;
use Sonata\MediaBundle\Twig\TokenParser\MediaTokenParser;
use Sonata\MediaBundle\Twig\TokenParser\PathTokenParser;
use Sonata\MediaBundle\Twig\TokenParser\ThumbnailTokenParser;
class MediaExtension extends \Twig_Extension
{
/**
* @var Pool
*/
protected $mediaService;
/**
* @var array
*/
protected $resources = array();
/**
* @var ManagerInterface
*/
protected $mediaManager;
/**
* @var \Twig_Environment
*/
protected $environment;
/**
* @param Pool $mediaService
* @param ManagerInterface $mediaManager
*/
public function __construct(Pool $mediaService, ManagerInterface $mediaManager)
{
$this->mediaService = $mediaService;
$this->mediaManager = $mediaManager;
}
/**
* {@inheritdoc}
*/
public function getTokenParsers()
{
return array(
new MediaTokenParser($this->getName()),
new ThumbnailTokenParser($this->getName()),
new PathTokenParser($this->getName()),
);
}
/**
* {@inheritdoc}
*/
public function initRuntime(\Twig_Environment $environment)
{
$this->environment = $environment;
}
/**
* {@inheritdoc}
*/
public function getName()
{
return 'sonata_media';
}
/**
* @param MediaInterface $media
* @param string $format
* @param array $options
*
* @return string
*/
public function media($media = null, $format, $options = array())
{
$media = $this->getMedia($media);
if (!$media) {
return '';
}
$provider = $this
->getMediaService()
->getProvider($media->getProviderName());
$format = $provider->getFormatName($media, $format);
$options = $provider->getHelperProperties($media, $format, $options);
return $this->render($provider->getTemplate('helper_view'), array(
'media' => $media,
'format' => $format,
'options' => $options,
));
}
/**
* @param mixed $media
*
* @return MediaInterface|null|bool
*/
private function getMedia($media)
{
if (!$media instanceof MediaInterface && strlen($media) > 0) {
$media = $this->mediaManager->findOneBy(array(
'id' => $media,
));
}
if (!$media instanceof MediaInterface) {
return false;
}
if ($media->getProviderStatus() !== MediaInterface::STATUS_OK) {
return false;
}
return $media;
}
/**
* Returns the thumbnail for the provided media.
*
* @param MediaInterface $media
* @param string $format
* @param array $options
*
* @return string
*/
public function thumbnail($media = null, $format, $options = array())
{
$media = $this->getMedia($media);
if (!$media) {
return '';
}
$provider = $this->getMediaService()
->getProvider($media->getProviderName());
$format = $provider->getFormatName($media, $format);
$format_definition = $provider->getFormat($format);
// build option
$defaultOptions = array(
'title' => $media->getName(),
);
if ($format_definition['width']) {
$defaultOptions['width'] = $format_definition['width'];
}
if ($format_definition['height']) {
$defaultOptions['height'] = $format_definition['height'];
}
$options = array_merge($defaultOptions, $options);
$options['src'] = $provider->generatePublicUrl($media, $format);
return $this->render($provider->getTemplate('helper_thumbnail'), array(
'media' => $media,
'options' => $options,
));
}
/**
* @param string $template
* @param array $parameters
*
* @return mixed
*/
public function render($template, array $parameters = array())
{
if (!isset($this->resources[$template])) {
$this->resources[$template] = $this->environment->loadTemplate($template);
}
return $this->resources[$template]->render($parameters);
}
/**
* @param MediaInterface $media
* @param string $format
*
* @return string
*/
public function path($media = null, $format)
{
$media = $this->getMedia($media);
if (!$media) {
return '';
}
$provider = $this->getMediaService()
->getProvider($media->getProviderName());
$format = $provider->getFormatName($media, $format);
return $provider->generatePublicUrl($media, $format);
}
/**
* @return Pool
*/
public function getMediaService()
{
return $this->mediaService;
}
}
| 1 | 7,322 | Why was this merged? It should have raised some questions IMO @core23 @OskarStark . It's already in 3 releases now, so we can't revert it can we? How can we fix this? Please have a look at #1065 | sonata-project-SonataMediaBundle | php |
@@ -125,6 +125,10 @@ public class ProcessBesuNodeRunner implements BesuNodeRunner {
params.add("--rpc-http-authentication-credentials-file");
params.add(node.jsonRpcConfiguration().getAuthenticationCredentialsFile());
}
+ if (node.jsonRpcConfiguration().getAuthenticationPublicKeyFile() != null) {
+ params.add("--rpc-http-authentication-public-key-file");
+ params.add(node.jsonRpcConfiguration().getAuthenticationPublicKeyFile().getAbsolutePath());
+ }
}
if (node.wsRpcEnabled()) { | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl.node;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.hyperledger.besu.cli.options.NetworkingOptions;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.tests.acceptance.dsl.StaticNodesUtils;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.ProcessBuilder.Redirect;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.awaitility.Awaitility;
public class ProcessBesuNodeRunner implements BesuNodeRunner {
private final Logger LOG = LogManager.getLogger();
private final Logger PROCESS_LOG = LogManager.getLogger("org.hyperledger.besu.SubProcessLog");
private final Map<String, Process> besuProcesses = new HashMap<>();
private final ExecutorService outputProcessorExecutor = Executors.newCachedThreadPool();
ProcessBesuNodeRunner() {
Runtime.getRuntime().addShutdownHook(new Thread(this::shutdown));
}
@Override
public void startNode(final BesuNode node) {
final Path dataDir = node.homeDirectory();
final List<String> params = new ArrayList<>();
params.add("build/install/besu/bin/besu");
params.add("--data-path");
params.add(dataDir.toAbsolutePath().toString());
if (node.isDevMode()) {
params.add("--network");
params.add("DEV");
}
params.add("--discovery-enabled");
params.add(Boolean.toString(node.isDiscoveryEnabled()));
params.add("--p2p-host");
params.add(node.p2pListenHost());
params.add("--p2p-port");
params.add("0");
if (node.getMiningParameters().isMiningEnabled()) {
params.add("--miner-enabled");
params.add("--miner-coinbase");
params.add(node.getMiningParameters().getCoinbase().get().toString());
}
if (node.getPrivacyParameters().isEnabled()) {
params.add("--privacy-enabled");
params.add("--privacy-url");
params.add(node.getPrivacyParameters().getEnclaveUri().toString());
params.add("--privacy-public-key-file");
params.add(node.getPrivacyParameters().getEnclavePublicKeyFile().getAbsolutePath());
params.add("--privacy-precompiled-address");
params.add(String.valueOf(node.getPrivacyParameters().getPrivacyAddress()));
params.add("--privacy-marker-transaction-signing-key-file");
params.add(node.homeDirectory().resolve("key").toString());
}
params.add("--bootnodes");
if (!node.getBootnodes().isEmpty()) {
params.add(node.getBootnodes().stream().map(URI::toString).collect(Collectors.joining(",")));
}
if (node.hasStaticNodes()) {
createStaticNodes(node);
}
if (node.isJsonRpcEnabled()) {
params.add("--rpc-http-enabled");
params.add("--rpc-http-host");
params.add(node.jsonRpcListenHost().get());
params.add("--rpc-http-port");
params.add(node.jsonRpcListenPort().map(Object::toString).get());
params.add("--rpc-http-api");
params.add(apiList(node.jsonRpcConfiguration().getRpcApis()));
if (node.jsonRpcConfiguration().isAuthenticationEnabled()) {
params.add("--rpc-http-authentication-enabled");
}
if (node.jsonRpcConfiguration().getAuthenticationCredentialsFile() != null) {
params.add("--rpc-http-authentication-credentials-file");
params.add(node.jsonRpcConfiguration().getAuthenticationCredentialsFile());
}
}
if (node.wsRpcEnabled()) {
params.add("--rpc-ws-enabled");
params.add("--rpc-ws-host");
params.add(node.wsRpcListenHost().get());
params.add("--rpc-ws-port");
params.add(node.wsRpcListenPort().map(Object::toString).get());
params.add("--rpc-ws-api");
params.add(apiList(node.webSocketConfiguration().getRpcApis()));
if (node.webSocketConfiguration().isAuthenticationEnabled()) {
params.add("--rpc-ws-authentication-enabled");
}
if (node.webSocketConfiguration().getAuthenticationCredentialsFile() != null) {
params.add("--rpc-ws-authentication-credentials-file");
params.add(node.webSocketConfiguration().getAuthenticationCredentialsFile());
}
}
if (node.isMetricsEnabled()) {
final MetricsConfiguration metricsConfiguration = node.getMetricsConfiguration();
params.add("--metrics-enabled");
params.add("--metrics-host");
params.add(metricsConfiguration.getHost());
params.add("--metrics-port");
params.add(Integer.toString(metricsConfiguration.getPort()));
for (final MetricCategory category : metricsConfiguration.getMetricCategories()) {
params.add("--metrics-category");
params.add(((Enum<?>) category).name());
}
if (metricsConfiguration.isPushEnabled()) {
params.add("--metrics-push-enabled");
params.add("--metrics-push-host");
params.add(metricsConfiguration.getPushHost());
params.add("--metrics-push-port");
params.add(Integer.toString(metricsConfiguration.getPushPort()));
params.add("--metrics-push-interval");
params.add(Integer.toString(metricsConfiguration.getPushInterval()));
params.add("--metrics-push-prometheus-job");
params.add(metricsConfiguration.getPrometheusJob());
}
}
node.getGenesisConfig()
.ifPresent(
genesis -> {
final Path genesisFile = createGenesisFile(node, genesis);
params.add("--genesis-file");
params.add(genesisFile.toAbsolutePath().toString());
});
if (!node.isP2pEnabled()) {
params.add("--p2p-enabled");
params.add("false");
} else {
final List<String> networkConfigParams =
NetworkingOptions.fromConfig(node.getNetworkingConfiguration()).getCLIOptions();
params.addAll(networkConfigParams);
}
if (node.isRevertReasonEnabled()) {
params.add("--revert-reason-enabled");
}
node.getPermissioningConfiguration()
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(
permissioningConfiguration -> {
if (permissioningConfiguration.isNodeWhitelistEnabled()) {
params.add("--permissions-nodes-config-file-enabled");
}
if (permissioningConfiguration.getNodePermissioningConfigFilePath() != null) {
params.add("--permissions-nodes-config-file");
params.add(permissioningConfiguration.getNodePermissioningConfigFilePath());
}
if (permissioningConfiguration.isAccountWhitelistEnabled()) {
params.add("--permissions-accounts-config-file-enabled");
}
if (permissioningConfiguration.getAccountPermissioningConfigFilePath() != null) {
params.add("--permissions-accounts-config-file");
params.add(permissioningConfiguration.getAccountPermissioningConfigFilePath());
}
});
node.getPermissioningConfiguration()
.flatMap(PermissioningConfiguration::getSmartContractConfig)
.ifPresent(
permissioningConfiguration -> {
if (permissioningConfiguration.isSmartContractNodeWhitelistEnabled()) {
params.add("--permissions-nodes-contract-enabled");
}
if (permissioningConfiguration.getNodeSmartContractAddress() != null) {
params.add("--permissions-nodes-contract-address");
params.add(permissioningConfiguration.getNodeSmartContractAddress().toString());
}
if (permissioningConfiguration.isSmartContractAccountWhitelistEnabled()) {
params.add("--permissions-accounts-contract-enabled");
}
if (permissioningConfiguration.getAccountSmartContractAddress() != null) {
params.add("--permissions-accounts-contract-address");
params.add(permissioningConfiguration.getAccountSmartContractAddress().toString());
}
});
params.addAll(node.getExtraCLIOptions());
params.add("--key-value-storage");
params.add("rocksdb");
LOG.info("Creating besu process with params {}", params);
final ProcessBuilder processBuilder =
new ProcessBuilder(params)
.directory(new File(System.getProperty("user.dir")).getParentFile().getParentFile())
.redirectErrorStream(true)
.redirectInput(Redirect.INHERIT);
if (!node.getPlugins().isEmpty()) {
processBuilder
.environment()
.put(
"BESU_OPTS",
"-Dbesu.plugins.dir=" + dataDir.resolve("plugins").toAbsolutePath().toString());
}
try {
final Process process = processBuilder.start();
outputProcessorExecutor.execute(() -> printOutput(node, process));
besuProcesses.put(node.getName(), process);
} catch (final IOException e) {
LOG.error("Error starting BesuNode process", e);
}
waitForPortsFile(dataDir);
}
private void printOutput(final BesuNode node, final Process process) {
try (final BufferedReader in =
new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) {
String line = in.readLine();
while (line != null) {
PROCESS_LOG.info("{}: {}", node.getName(), line);
line = in.readLine();
}
} catch (final IOException e) {
LOG.error("Failed to read output from process", e);
}
}
private Path createGenesisFile(final BesuNode node, final String genesisConfig) {
try {
final Path genesisFile = Files.createTempFile(node.homeDirectory(), "genesis", "");
genesisFile.toFile().deleteOnExit();
Files.write(genesisFile, genesisConfig.getBytes(UTF_8));
return genesisFile;
} catch (final IOException e) {
throw new IllegalStateException(e);
}
}
private void createStaticNodes(final BesuNode node) {
StaticNodesUtils.createStaticNodesFile(node.homeDirectory(), node.getStaticNodes());
}
private String apiList(final Collection<RpcApi> rpcApis) {
return rpcApis.stream().map(RpcApis::getValue).collect(Collectors.joining(","));
}
@Override
public void stopNode(final BesuNode node) {
node.stop();
if (besuProcesses.containsKey(node.getName())) {
final Process process = besuProcesses.get(node.getName());
killBesuProcess(node.getName(), process);
}
}
@Override
public synchronized void shutdown() {
final HashMap<String, Process> localMap = new HashMap<>(besuProcesses);
localMap.forEach(this::killBesuProcess);
outputProcessorExecutor.shutdown();
try {
if (!outputProcessorExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
LOG.error("Output processor executor did not shutdown cleanly.");
}
} catch (final InterruptedException e) {
LOG.error("Interrupted while already shutting down", e);
Thread.currentThread().interrupt();
}
}
@Override
public boolean isActive(final String nodeName) {
final Process process = besuProcesses.get(nodeName);
return process != null && process.isAlive();
}
private void killBesuProcess(final String name, final Process process) {
LOG.info("Killing " + name + " process");
Awaitility.waitAtMost(30, TimeUnit.SECONDS)
.until(
() -> {
if (process.isAlive()) {
process.destroy();
besuProcesses.remove(name);
return false;
} else {
besuProcesses.remove(name);
return true;
}
});
}
}
| 1 | 20,408 | What if `node.jsonRpcConfiguration().getAuthenticationPublicKeyFile()` is empty string, would that cause a problem here? | hyperledger-besu | java |
@@ -57,6 +57,12 @@ type CertificateSpec struct {
// Organization is the organization to be used on the Certificate
Organization []string `json:"organization,omitempty"`
+ // Certificate default Duration
+ Duration metav1.Duration `json:"duration,omitempty"`
+
+ // Certificate renew before expiration duration
+ RenewBefore metav1.Duration `json:"renewBefore,omitempty"`
+
// DNSNames is a list of subject alt names to be used on the Certificate
DNSNames []string `json:"dnsNames,omitempty"`
| 1 | /*
Copyright 2018 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=certificates
// Certificate is a type to represent a Certificate from ACME
type Certificate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CertificateSpec `json:"spec,omitempty"`
Status CertificateStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CertificateList is a list of Certificates
type CertificateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Certificate `json:"items"`
}
type KeyAlgorithm string
const (
RSAKeyAlgorithm KeyAlgorithm = "rsa"
ECDSAKeyAlgorithm KeyAlgorithm = "ecdsa"
)
// CertificateSpec defines the desired state of Certificate
type CertificateSpec struct {
// CommonName is a common name to be used on the Certificate
CommonName string `json:"commonName,omitempty"`
// Organization is the organization to be used on the Certificate
Organization []string `json:"organization,omitempty"`
// DNSNames is a list of subject alt names to be used on the Certificate
DNSNames []string `json:"dnsNames,omitempty"`
// SecretName is the name of the secret resource to store this secret in
SecretName string `json:"secretName"`
// IssuerRef is a reference to the issuer for this certificate.
// If the 'kind' field is not set, or set to 'Issuer', an Issuer resource
// with the given name in the same namespace as the Certificate will be used.
// If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the
// provided name will be used.
// The 'name' field in this stanza is required at all times.
IssuerRef ObjectReference `json:"issuerRef"`
// IsCA will mark this Certificate as valid for signing.
// This implies that the 'signing' usage is set
IsCA bool `json:"isCA,omitempty"`
// ACME contains configuration specific to ACME Certificates.
// Notably, this contains details on how the domain names listed on this
// Certificate resource should be 'solved', i.e. mapping HTTP01 and DNS01
// providers to DNS names.
ACME *ACMECertificateConfig `json:"acme,omitempty"`
// KeySize is the key bit size of the corresponding private key for this certificate.
// If provided, value must be between 2048 and 8192 inclusive when KeyAlgorithm is
// empty or is set to "rsa", and value must be one of (256, 384, 521) when
// KeyAlgorithm is set to "ecdsa".
KeySize int `json:"keySize,omitempty"`
// KeyAlgorithm is the private key algorithm of the corresponding private key
// for this certificate. If provided, allowed values are either "rsa" or "ecdsa"
// If KeyAlgorithm is specified and KeySize is not provided,
// key size of 256 will be used for "ecdsa" key algorithm and
// key size of 2048 will be used for "rsa" key algorithm.
KeyAlgorithm KeyAlgorithm `json:"keyAlgorithm,omitempty"`
}
// ACMECertificateConfig contains the configuration for the ACME certificate provider
type ACMECertificateConfig struct {
Config []DomainSolverConfig `json:"config"`
}
// CertificateStatus defines the observed state of Certificate
type CertificateStatus struct {
Conditions []CertificateCondition `json:"conditions,omitempty"`
LastFailureTime *metav1.Time `json:"lastFailureTime,omitempty"`
}
// CertificateCondition contains condition information for an Certificate.
type CertificateCondition struct {
// Type of the condition, currently ('Ready').
Type CertificateConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// CertificateConditionType represents an Certificate condition value.
type CertificateConditionType string
const (
// CertificateConditionReady represents the fact that a given Certificate condition
// is in ready state.
CertificateConditionReady CertificateConditionType = "Ready"
// CertificateConditionValidationFailed is used to indicate whether a
// validation for a Certificate has failed.
// This is currently used by the ACME issuer to track when the last
// validation was attempted.
CertificateConditionValidationFailed CertificateConditionType = "ValidateFailed"
)
| 1 | 13,832 | The `omitempty` struct tag does not do anything for non-pointer structs. I think we may need to consider making both of these fields pointers, so that they are excluded from output when not set, and also to make it easier to compare to the zero value. That said, I'm happy to merge this now and open an issue to verify the behaviour is as we want ahead of cutting v0.6.0. | jetstack-cert-manager | go |
@@ -120,7 +120,7 @@ public interface GenericToken<T extends GenericToken<T>> {
+ ") must come before " + to + " (at " + to.getStartInDocument() + ")"
);
}
- return IteratorUtil.generate(from, t -> t == to ? null : t.getNext());
+ return IteratorUtil.generate(from, t -> t.equals(to) ? null : t.getNext());
}
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.Iterator;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.internal.util.IteratorUtil;
import net.sourceforge.pmd.lang.ast.impl.javacc.JavaccToken;
/**
* Represents a language-independent token such as constants, values language reserved keywords, or comments.
*/
public interface GenericToken<T extends GenericToken<T>> {
/**
* Obtain the next generic token according to the input stream which generated the instance of this token.
*
* @return the next generic token if it exists; null if it does not exist
*/
T getNext();
/**
* Obtain a comment-type token which, according to the input stream which generated the instance of this token,
* precedes this instance token and succeeds the previous generic token (if there is any).
*
* @return the comment-type token if it exists; null if it does not exist
*/
T getPreviousComment();
/**
* Returns the token's text.
*/
String getImage();
/**
* Returns true if this token is an end-of-file token. This is the
* last token of token sequences that have been fully lexed.
*/
boolean isEof();
// TODO these default implementations are here for compatibility because
// the functionality is only used in pmd-java for now, though it could
// be ported. I prefer doing this as changing all the GenericToken in
// pmd-java to JavaccToken
/** Inclusive start offset in the source file text. */
default int getStartInDocument() {
return -1;
}
/** Exclusive end offset in the source file text. */
default int getEndInDocument() {
return -1;
}
/**
* Gets the line where the token's region begins
*
* @return a non-negative integer containing the begin line
*/
int getBeginLine();
/**
* Gets the line where the token's region ends
*
* @return a non-negative integer containing the end line
*/
int getEndLine();
/**
* Gets the column offset from the start of the begin line where the token's region begins
*
* @return a non-negative integer containing the begin column
*/
int getBeginColumn();
/**
* Gets the column offset from the start of the end line where the token's region ends
*
* @return a non-negative integer containing the begin column
*/
int getEndColumn();
/**
* Returns true if this token is implicit, ie was inserted artificially
* and has a zero-length image.
*/
default boolean isImplicit() {
return false;
}
/**
* Returns an iterator that enumerates all (non-special) tokens
* between the two tokens (bounds included).
*
* @param from First token to yield (inclusive)
* @param to Last token to yield (inclusive)
*
* @return An iterator
*
* @throws IllegalArgumentException If the first token does not come before the other token
*/
static Iterator<JavaccToken> range(JavaccToken from, JavaccToken to) {
if (from.getStartInDocument() > to.getStartInDocument()) {
throw new IllegalArgumentException(
from + " (at " + from.getStartInDocument()
+ ") must come before " + to + " (at " + to.getStartInDocument() + ")"
);
}
return IteratorUtil.generate(from, t -> t == to ? null : t.getNext());
}
/**
* Returns an iterable that enumerates all special tokens belonging
* to the given token.
*
* @param from Token from which to start, note that the returned iterable
* does not contain that token
*
* @return An iterator, possibly empty, not containing the parameter
*
* @throws NullPointerException If the parameter s null
*/
static Iterable<JavaccToken> previousSpecials(JavaccToken from) {
return () -> IteratorUtil.generate(from.getPreviousComment(), JavaccToken::getPreviousComment);
}
/**
* Gets a unique integer representing the kind of token this is.
* The semantics of this kind depend on the language.
*
* <p><strong>Note:</strong> This is an experimental API.
*
* <p>The returned constants can be looked up in the language's "*ParserConstants",
* e.g. CppParserConstants or JavaParserConstants. These constants are considered
* internal API and may change at any time when the language's grammar is changed.
*/
@Experimental
int getKind();
}
| 1 | 19,282 | I think this is should absolutely be `==`, as the interface cannot control the implementation of equals (and it's part of the contract of the enclosing function). Can we add this interface to the exceptions of the rule? | pmd-pmd | java |
@@ -22,11 +22,12 @@ type ns struct {
ipt iptables.Interface // interface to iptables
ips ipset.Interface // interface to ipset
- name string // k8s Namespace name
- nodeName string // my node name
- namespace *coreapi.Namespace // k8s Namespace object
- pods map[types.UID]*coreapi.Pod // k8s Pod objects by UID
- policies map[types.UID]interface{} // k8s NetworkPolicy objects by UID
+ name string // k8s Namespace name
+ nodeName string // my node name
+ namespaceUID types.UID // k8s Namespace UID
+ namespaceLabels map[string]string // k8s Namespace labels
+ pods map[types.UID]*coreapi.Pod // k8s Pod objects by UID
+ policies map[types.UID]interface{} // k8s NetworkPolicy objects by UID
uid types.UID // surrogate UID to own allPods selector
allPods *selectorSpec // hash:ip ipset of all pod IPs in this namespace | 1 | package npc
import (
"errors"
"fmt"
coreapi "k8s.io/api/core/v1"
extnapi "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"github.com/weaveworks/weave/common"
"github.com/weaveworks/weave/net/ipset"
"github.com/weaveworks/weave/npc/iptables"
)
var errInvalidNetworkPolicyObjType = errors.New("invalid NetworkPolicy object type")
type ns struct {
ipt iptables.Interface // interface to iptables
ips ipset.Interface // interface to ipset
name string // k8s Namespace name
nodeName string // my node name
namespace *coreapi.Namespace // k8s Namespace object
pods map[types.UID]*coreapi.Pod // k8s Pod objects by UID
policies map[types.UID]interface{} // k8s NetworkPolicy objects by UID
uid types.UID // surrogate UID to own allPods selector
allPods *selectorSpec // hash:ip ipset of all pod IPs in this namespace
// stores IP addrs of pods which are not selected by any target podSelector of
// any netpol; used as a target in the WEAVE-NPC-{INGRESS,EGRESS}-DEFAULT
// iptables chains.
ingressDefaultAllowIPSet ipset.Name
egressDefaultAllowIPSet ipset.Name
nsSelectors *selectorSet // reference to global selectorSet that is shared across the `ns`. Used to represent all pods in the matching namespaces
podSelectors *selectorSet // used to represent the matching pods in namespace respresented by this `ns`
namespacedPodsSelectors *selectorSet // reference to global selectorSet that is shared across the `ns`. Used to represent matching pods in matching namespace
ipBlocks *ipBlockSet
rules *ruleSet
}
func newNS(name, nodeName string, ipt iptables.Interface, ips ipset.Interface, nsSelectors *selectorSet, namespacedPodsSelectors *selectorSet, namespaceObj *coreapi.Namespace) (*ns, error) {
allPods, err := newSelectorSpec(&metav1.LabelSelector{}, nil, nil, name, ipset.HashIP)
if err != nil {
return nil, err
}
ns := &ns{
ipt: ipt,
ips: ips,
name: name,
namespace: namespaceObj,
nodeName: nodeName,
pods: make(map[types.UID]*coreapi.Pod),
policies: make(map[types.UID]interface{}),
uid: uuid.NewUUID(),
allPods: allPods,
nsSelectors: nsSelectors,
namespacedPodsSelectors: namespacedPodsSelectors,
ipBlocks: newIPBlockSet(ips),
rules: newRuleSet(ipt),
}
ns.podSelectors = newSelectorSet(ips, ns.onNewPodSelector, ns.onNewTargetPodSelector, ns.onDestroyTargetPodSelector)
ingressDefaultAllowIPSet := ipset.Name(IpsetNamePrefix + shortName("ingress-default-allow:"+name))
if err := ips.Create(ingressDefaultAllowIPSet, ipset.HashIP); err != nil {
return nil, err
}
ns.ingressDefaultAllowIPSet = ingressDefaultAllowIPSet
egressDefaultAllowIPSet := ipset.Name(IpsetNamePrefix + shortName("egress-default-allow:"+name))
if err := ips.Create(egressDefaultAllowIPSet, ipset.HashIP); err != nil {
return nil, err
}
ns.egressDefaultAllowIPSet = egressDefaultAllowIPSet
if err := ns.podSelectors.provision(ns.uid, nil, map[string]*selectorSpec{ns.allPods.key: ns.allPods}); err != nil {
return nil, err
}
return ns, nil
}
func (ns *ns) empty() bool {
return len(ns.pods) == 0 && len(ns.policies) == 0 && ns.namespace == nil
}
func (ns *ns) destroy() error {
return ns.podSelectors.deprovision(ns.uid, map[string]*selectorSpec{ns.allPods.key: ns.allPods}, nil)
}
func (ns *ns) onNewPodSelector(selector *selector) error {
for _, pod := range ns.pods {
if hasIP(pod) {
if selector.matchesPodSelector(pod.ObjectMeta.Labels) {
if err := selector.addEntry(pod.ObjectMeta.UID, pod.Status.PodIP, podComment(pod)); err != nil {
return err
}
}
}
}
return nil
}
func (ns *ns) onNewTargetPodSelector(selector *selector, policyType policyType) error {
for _, pod := range ns.pods {
if hasIP(pod) {
// Remove the pod from default-allow if dst podselector matches the pod
ipset := ns.defaultAllowIPSetName(policyType)
if selector.matchesPodSelector(pod.ObjectMeta.Labels) {
if err := ns.ips.DelEntry(pod.ObjectMeta.UID, ipset, pod.Status.PodIP); err != nil {
return err
}
}
}
}
return nil
}
func (ns *ns) onDestroyTargetPodSelector(selector *selector, policyType policyType) error {
for _, pod := range ns.pods {
if hasIP(pod) {
if selector.matchesPodSelector(pod.ObjectMeta.Labels) {
if err := ns.addToDefaultAllowIfNoMatching(pod, policyType); err != nil {
return err
}
}
}
}
return nil
}
// Add pod IP addr to default-allow ipset if there are no matching target selectors
func (ns *ns) addToDefaultAllowIfNoMatching(pod *coreapi.Pod, policyType policyType) error {
found := false
// TODO(mp) optimize (avoid iterating over selectors) by ref counting IP addrs.
for _, s := range ns.podSelectors.entries {
if ns.podSelectors.targetSelectorExist(s, policyType) && s.matchesPodSelector(pod.ObjectMeta.Labels) {
found = true
break
}
}
if !found {
ipset := ns.defaultAllowIPSetName(policyType)
if err := ns.ips.AddEntry(pod.ObjectMeta.UID, ipset, pod.Status.PodIP, podComment(pod)); err != nil {
return err
}
}
return nil
}
func (ns *ns) checkLocalPod(obj *coreapi.Pod) bool {
if obj.Spec.NodeName != ns.nodeName {
return false
}
return true
}
func (ns *ns) addPod(obj *coreapi.Pod) error {
ns.pods[obj.ObjectMeta.UID] = obj
if !hasIP(obj) {
return nil
}
foundIngress, foundEgress, err := ns.podSelectors.addToMatchingPodSelector(obj.ObjectMeta.UID, obj.ObjectMeta.Labels, obj.Status.PodIP, podComment(obj))
if err != nil {
return err
}
// If there are no matching target selectors, add the pod to default-allow
if !foundIngress {
if err := ns.ips.AddEntry(obj.ObjectMeta.UID, ns.ingressDefaultAllowIPSet, obj.Status.PodIP, podComment(obj)); err != nil {
return err
}
}
if !foundEgress {
if err := ns.ips.AddEntry(obj.ObjectMeta.UID, ns.egressDefaultAllowIPSet, obj.Status.PodIP, podComment(obj)); err != nil {
return err
}
}
err = ns.namespacedPodsSelectors.addToMatchingNamespacedPodSelector(obj.ObjectMeta.UID, obj.ObjectMeta.Labels, ns.namespace.ObjectMeta.Labels, obj.Status.PodIP, podComment(obj))
if err != nil {
return err
}
return nil
}
func (ns *ns) updatePod(oldObj, newObj *coreapi.Pod) error {
delete(ns.pods, oldObj.ObjectMeta.UID)
ns.pods[newObj.ObjectMeta.UID] = newObj
if !hasIP(oldObj) && !hasIP(newObj) {
return nil
}
if hasIP(oldObj) && !hasIP(newObj) {
if err := ns.ips.DelEntry(oldObj.ObjectMeta.UID, ns.ingressDefaultAllowIPSet, oldObj.Status.PodIP); err != nil {
return err
}
if err := ns.ips.DelEntry(oldObj.ObjectMeta.UID, ns.egressDefaultAllowIPSet, oldObj.Status.PodIP); err != nil {
return err
}
if err := ns.namespacedPodsSelectors.delFromMatchingNamespacedPodSelector(oldObj.ObjectMeta.UID, oldObj.ObjectMeta.Labels, ns.namespace.ObjectMeta.Labels, oldObj.Status.PodIP); err != nil {
return err
}
return ns.podSelectors.delFromMatchingPodSelector(oldObj.ObjectMeta.UID, oldObj.ObjectMeta.Labels, oldObj.Status.PodIP)
}
if !hasIP(oldObj) && hasIP(newObj) {
foundIngress, foundEgress, err := ns.podSelectors.addToMatchingPodSelector(newObj.ObjectMeta.UID, newObj.ObjectMeta.Labels, newObj.Status.PodIP, podComment(newObj))
if err != nil {
return err
}
if !foundIngress {
if err := ns.ips.AddEntry(newObj.ObjectMeta.UID, ns.ingressDefaultAllowIPSet, newObj.Status.PodIP, podComment(newObj)); err != nil {
return err
}
}
if !foundEgress {
if err := ns.ips.AddEntry(newObj.ObjectMeta.UID, ns.egressDefaultAllowIPSet, newObj.Status.PodIP, podComment(newObj)); err != nil {
return err
}
}
err = ns.namespacedPodsSelectors.addToMatchingNamespacedPodSelector(newObj.ObjectMeta.UID, newObj.ObjectMeta.Labels, ns.namespace.ObjectMeta.Labels, newObj.Status.PodIP, podComment(newObj))
if err != nil {
return err
}
return nil
}
if oldObj.Status.PodIP != newObj.Status.PodIP {
if err := ns.updateDefaultAllowIPSetEntry(oldObj, newObj, ns.ingressDefaultAllowIPSet); err != nil {
return err
}
if err := ns.updateDefaultAllowIPSetEntry(oldObj, newObj, ns.egressDefaultAllowIPSet); err != nil {
return err
}
}
if !equals(oldObj.ObjectMeta.Labels, newObj.ObjectMeta.Labels) ||
oldObj.Status.PodIP != newObj.Status.PodIP {
for _, ps := range ns.podSelectors.entries {
oldMatch := ps.matchesPodSelector(oldObj.ObjectMeta.Labels)
newMatch := ps.matchesPodSelector(newObj.ObjectMeta.Labels)
if oldMatch == newMatch && oldObj.Status.PodIP == newObj.Status.PodIP {
continue
}
if oldMatch {
if err := ps.delEntry(oldObj.ObjectMeta.UID, oldObj.Status.PodIP); err != nil {
return err
}
}
if newMatch {
if err := ps.addEntry(newObj.ObjectMeta.UID, newObj.Status.PodIP, podComment(newObj)); err != nil {
return err
}
}
if err := ns.addOrRemoveToDefaultAllowIPSet(ps, oldObj, newObj, oldMatch, newMatch, policyTypeIngress); err != nil {
return err
}
if err := ns.addOrRemoveToDefaultAllowIPSet(ps, oldObj, newObj, oldMatch, newMatch, policyTypeEgress); err != nil {
return err
}
}
for _, ps := range ns.namespacedPodsSelectors.entries {
oldMatch := ps.matchesNamespacedPodSelector(oldObj.ObjectMeta.Labels, ns.namespace.ObjectMeta.Labels)
newMatch := ps.matchesNamespacedPodSelector(newObj.ObjectMeta.Labels, ns.namespace.ObjectMeta.Labels)
if oldMatch == newMatch && oldObj.Status.PodIP == newObj.Status.PodIP {
continue
}
if oldMatch {
if err := ps.delEntry(oldObj.ObjectMeta.UID, oldObj.Status.PodIP); err != nil {
return err
}
}
if newMatch {
if err := ps.addEntry(newObj.ObjectMeta.UID, newObj.Status.PodIP, podComment(newObj)); err != nil {
return err
}
}
}
}
return nil
}
func (ns *ns) addOrRemoveToDefaultAllowIPSet(ps *selector, oldObj, newObj *coreapi.Pod, oldMatch, newMatch bool, policyType policyType) error {
ipset := ns.defaultAllowIPSetName(policyType)
if ns.podSelectors.targetSelectorExist(ps, policyType) {
switch {
case !oldMatch && newMatch:
if err := ns.ips.DelEntry(oldObj.ObjectMeta.UID, ipset, oldObj.Status.PodIP); err != nil {
return err
}
case oldMatch && !newMatch:
if err := ns.addToDefaultAllowIfNoMatching(newObj, policyType); err != nil {
return err
}
}
}
return nil
}
func (ns *ns) deletePod(obj *coreapi.Pod) error {
delete(ns.pods, obj.ObjectMeta.UID)
if !hasIP(obj) {
return nil
}
if err := ns.ips.DelEntry(obj.ObjectMeta.UID, ns.ingressDefaultAllowIPSet, obj.Status.PodIP); err != nil {
return err
}
if err := ns.ips.DelEntry(obj.ObjectMeta.UID, ns.egressDefaultAllowIPSet, obj.Status.PodIP); err != nil {
return err
}
if err := ns.podSelectors.delFromMatchingPodSelector(obj.ObjectMeta.UID, obj.ObjectMeta.Labels, obj.Status.PodIP); err != nil {
return err
}
if err := ns.namespacedPodsSelectors.delFromMatchingNamespacedPodSelector(obj.ObjectMeta.UID, obj.ObjectMeta.Labels, ns.namespace.ObjectMeta.Labels, obj.Status.PodIP); err != nil {
return err
}
return nil
}
func (ns *ns) addNetworkPolicy(obj interface{}) error {
// Analyse policy, determine which rules and ipsets are required
uid, rules, nsSelectors, podSelectors, namespacedPodsSelectors, ipBlocks, err := ns.analyse(obj)
if err != nil {
return err
}
// Provision required resources in dependency order
if err := ns.nsSelectors.provision(uid, nil, nsSelectors); err != nil {
return err
}
if err := ns.podSelectors.provision(uid, nil, podSelectors); err != nil {
return err
}
if err := ns.namespacedPodsSelectors.provision(uid, nil, namespacedPodsSelectors); err != nil {
return err
}
if err := ns.ipBlocks.provision(uid, nil, ipBlocks); err != nil {
return err
}
return ns.rules.provision(uid, nil, rules)
}
func (ns *ns) updateNetworkPolicy(oldObj, newObj interface{}) error {
// Analyse the old and the new policy so we can determine differences
oldUID, oldRules, oldNsSelectors, oldPodSelectors, oldNamespacedPodsSelectors, oldIPBlocks, err := ns.analyse(oldObj)
if err != nil {
return err
}
newUID, newRules, newNsSelectors, newPodSelectors, newNamespacedPodsSelectors, newIPBlocks, err := ns.analyse(newObj)
if err != nil {
return err
}
delete(ns.policies, oldUID)
ns.policies[newUID] = newObj
// Deprovision unused and provision newly required resources in dependency order
if err := ns.rules.deprovision(oldUID, oldRules, newRules); err != nil {
return err
}
if err := ns.nsSelectors.deprovision(oldUID, oldNsSelectors, newNsSelectors); err != nil {
return err
}
if err := ns.podSelectors.deprovision(oldUID, oldPodSelectors, newPodSelectors); err != nil {
return err
}
if err := ns.namespacedPodsSelectors.deprovision(oldUID, oldNamespacedPodsSelectors, newNamespacedPodsSelectors); err != nil {
return err
}
if err := ns.ipBlocks.deprovision(oldUID, oldIPBlocks, newIPBlocks); err != nil {
return err
}
if err := ns.nsSelectors.provision(oldUID, oldNsSelectors, newNsSelectors); err != nil {
return err
}
if err := ns.podSelectors.provision(oldUID, oldPodSelectors, newPodSelectors); err != nil {
return err
}
if err := ns.namespacedPodsSelectors.provision(oldUID, oldNamespacedPodsSelectors, newNamespacedPodsSelectors); err != nil {
return err
}
if err := ns.ipBlocks.provision(oldUID, oldIPBlocks, newIPBlocks); err != nil {
return err
}
return ns.rules.provision(oldUID, oldRules, newRules)
}
func (ns *ns) deleteNetworkPolicy(obj interface{}) error {
// Analyse network policy to free resources
uid, rules, nsSelectors, podSelectors, namespacedPodsSelectors, ipBlocks, err := ns.analyse(obj)
if err != nil {
return err
}
delete(ns.policies, uid)
// Deprovision unused resources in dependency order
if err := ns.rules.deprovision(uid, rules, nil); err != nil {
return err
}
if err := ns.nsSelectors.deprovision(uid, nsSelectors, nil); err != nil {
return err
}
if err := ns.podSelectors.deprovision(uid, podSelectors, nil); err != nil {
return err
}
if err := ns.namespacedPodsSelectors.deprovision(uid, namespacedPodsSelectors, nil); err != nil {
return err
}
if err := ns.ipBlocks.deprovision(uid, ipBlocks, nil); err != nil {
return err
}
return nil
}
func (ns *ns) updateDefaultAllowIPSetEntry(oldObj, newObj *coreapi.Pod, ipsetName ipset.Name) error {
// Instead of iterating over all selectors we check whether old pod IP
// has been inserted into default-allow ipset to decide whether the IP
// in the ipset has to be updated.
if ns.ips.Exist(oldObj.ObjectMeta.UID, ipsetName, oldObj.Status.PodIP) {
if err := ns.ips.DelEntry(oldObj.ObjectMeta.UID, ipsetName, oldObj.Status.PodIP); err != nil {
return err
}
if err := ns.ips.AddEntry(newObj.ObjectMeta.UID, ipsetName, newObj.Status.PodIP, podComment(newObj)); err != nil {
return err
}
}
return nil
}
func bypassRules(namespace string, ingress, egress ipset.Name) map[string][][]string {
return map[string][][]string{
DefaultChain: {
{"-m", "set", "--match-set", string(ingress), "dst", "-j", "ACCEPT",
"-m", "comment", "--comment", "DefaultAllow ingress isolation for namespace: " + namespace},
},
EgressDefaultChain: {
{"-m", "set", "--match-set", string(egress), "src", "-j", EgressMarkChain,
"-m", "comment", "--comment", "DefaultAllow egress isolation for namespace: " + namespace},
{"-m", "set", "--match-set", string(egress), "src", "-j", "RETURN",
"-m", "comment", "--comment", "DefaultAllow egress isolation for namespace: " + namespace},
},
}
}
func (ns *ns) ensureBypassRules() error {
for chain, rules := range bypassRules(ns.name, ns.ingressDefaultAllowIPSet, ns.egressDefaultAllowIPSet) {
for _, rule := range rules {
common.Log.Debugf("adding rule for DefaultAllow in namespace: %s, chain: %s, %s", ns.name, chain, rule)
if err := ns.ipt.Append(TableFilter, chain, rule...); err != nil {
return err
}
}
}
return nil
}
func (ns *ns) deleteBypassRules() error {
for chain, rules := range bypassRules(ns.name, ns.ingressDefaultAllowIPSet, ns.egressDefaultAllowIPSet) {
for _, rule := range rules {
common.Log.Debugf("removing rule for DefaultAllow in namespace: %s, chain: %s, %s", ns.name, chain, rule)
if err := ns.ipt.Delete(TableFilter, chain, rule...); err != nil {
return err
}
}
}
return nil
}
func (ns *ns) addNamespace(obj *coreapi.Namespace) error {
ns.namespace = obj
// Insert a rule to bypass policies
if err := ns.ensureBypassRules(); err != nil {
return err
}
// Add namespace ipset to matching namespace selectors
err := ns.nsSelectors.addToMatchingNamespaceSelector(obj.ObjectMeta.UID, obj.ObjectMeta.Labels, string(ns.allPods.ipsetName), namespaceComment(ns))
return err
}
func (ns *ns) updateNamespace(oldObj, newObj *coreapi.Namespace) error {
ns.namespace = newObj
// Re-evaluate namespace selector membership if labels have changed
if !equals(oldObj.ObjectMeta.Labels, newObj.ObjectMeta.Labels) {
for _, selector := range ns.nsSelectors.entries {
oldMatch := selector.matchesNamespaceSelector(oldObj.ObjectMeta.Labels)
newMatch := selector.matchesNamespaceSelector(newObj.ObjectMeta.Labels)
if oldMatch == newMatch {
continue
}
if oldMatch {
if err := selector.delEntry(ns.namespace.ObjectMeta.UID, string(ns.allPods.ipsetName)); err != nil {
return err
}
}
if newMatch {
if err := selector.addEntry(ns.namespace.ObjectMeta.UID, string(ns.allPods.ipsetName), namespaceComment(ns)); err != nil {
return err
}
}
}
}
return nil
}
func (ns *ns) deleteNamespace(obj *coreapi.Namespace) error {
ns.namespace = nil
// Remove bypass rule
if err := ns.deleteBypassRules(); err != nil {
return err
}
// Remove namespace ipset from any matching namespace selectors
err := ns.nsSelectors.delFromMatchingNamespaceSelector(obj.ObjectMeta.UID, obj.ObjectMeta.Labels, string(ns.allPods.ipsetName))
if err != nil {
return err
}
if err := ns.ips.Destroy(ns.ingressDefaultAllowIPSet); err != nil {
return err
}
if err := ns.ips.Destroy(ns.egressDefaultAllowIPSet); err != nil {
return err
}
return nil
}
func hasIP(pod *coreapi.Pod) bool {
// Ensure pod isn't dead, has an IP address and isn't sharing the host network namespace
return pod.Status.Phase != "Succeeded" && pod.Status.Phase != "Failed" &&
len(pod.Status.PodIP) > 0 && !pod.Spec.HostNetwork
}
func equals(a, b map[string]string) bool {
if len(a) != len(b) {
return false
}
for ak, av := range a {
if b[ak] != av {
return false
}
}
return true
}
func namespaceComment(namespace *ns) string {
return "namespace: " + namespace.name
}
func podComment(pod *coreapi.Pod) string {
return fmt.Sprintf("namespace: %s, pod: %s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
}
func (ns *ns) defaultAllowIPSetName(pt policyType) ipset.Name {
ipset := ns.ingressDefaultAllowIPSet
if pt == policyTypeEgress {
ipset = ns.egressDefaultAllowIPSet
}
return ipset
}
func (ns *ns) analyse(obj interface{}) (
uid types.UID,
rules map[string]*ruleSpec,
nsSelectors, podSelectors, namespacedPodsSelectors map[string]*selectorSpec,
ipBlocks map[string]*ipBlockSpec,
err error) {
switch p := obj.(type) {
case *extnapi.NetworkPolicy:
uid = p.ObjectMeta.UID
case *networkingv1.NetworkPolicy:
uid = p.ObjectMeta.UID
default:
err = errInvalidNetworkPolicyObjType
return
}
ns.policies[uid] = obj
// Analyse policy, determine which rules and ipsets are required
rules, nsSelectors, podSelectors, namespacedPodsSelectors, ipBlocks, err = ns.analysePolicy(obj.(*networkingv1.NetworkPolicy))
if err != nil {
return
}
return
}
| 1 | 16,046 | It looks like the UID is the only other thing that we use from `namespace`, so I suggest to copy that out and lose `namespace`, so we don't have to worry about setting it to nil. | weaveworks-weave | go |
@@ -17,6 +17,7 @@ const (
GithubV1ProviderName = "GitHubV1"
CodeCommitProviderName = "CodeCommit"
BitbucketProviderName = "Bitbucket"
+ DefaultImage = "aws/codebuild/amazonlinux2-x86_64-standard:3.0"
pipelineManifestPath = "cicd/pipeline.yml"
) | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"errors"
"fmt"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/fatih/structs"
"gopkg.in/yaml.v3"
)
const (
GithubProviderName = "GitHub"
GithubV1ProviderName = "GitHubV1"
CodeCommitProviderName = "CodeCommit"
BitbucketProviderName = "Bitbucket"
pipelineManifestPath = "cicd/pipeline.yml"
)
// Provider defines a source of the artifacts
// that will be built and deployed via a pipeline
type Provider interface {
fmt.Stringer
Name() string
Properties() map[string]interface{}
}
type githubV1Provider struct {
properties *GitHubV1Properties
}
func (p *githubV1Provider) Name() string {
return GithubV1ProviderName
}
func (p *githubV1Provider) String() string {
return GithubProviderName
}
func (p *githubV1Provider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
type githubProvider struct {
properties *GitHubProperties
}
func (p *githubProvider) Name() string {
return GithubProviderName
}
func (p *githubProvider) String() string {
return GithubProviderName
}
func (p *githubProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
type codecommitProvider struct {
properties *CodeCommitProperties
}
func (p *codecommitProvider) Name() string {
return CodeCommitProviderName
}
func (p *codecommitProvider) String() string {
return CodeCommitProviderName
}
func (p *codecommitProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
type bitbucketProvider struct {
properties *BitbucketProperties
}
func (p *bitbucketProvider) Name() string {
return BitbucketProviderName
}
func (p *bitbucketProvider) String() string {
return BitbucketProviderName
}
func (p *bitbucketProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
// GitHubV1Properties contain information for configuring a Githubv1
// source provider.
type GitHubV1Properties struct {
// use tag from https://godoc.org/github.com/fatih/structs#example-Map--Tags
// to specify the name of the field in the output properties
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
GithubSecretIdKeyName string `structs:"access_token_secret" yaml:"access_token_secret"`
}
// GitHubProperties contains information for configuring a GitHubv2
// source provider.
type GitHubProperties struct {
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
}
// BitbucketProperties contains information for configuring a Bitbucket
// source provider.
type BitbucketProperties struct {
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
}
// CodeCommitProperties contains information for configuring a CodeCommit
// source provider.
type CodeCommitProperties struct {
RepositoryURL string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
}
// NewProvider creates a source provider based on the type of
// the provided provider-specific configurations
func NewProvider(configs interface{}) (Provider, error) {
switch props := configs.(type) {
case *GitHubV1Properties:
return &githubV1Provider{
properties: props,
}, nil
case *GitHubProperties:
return &githubProvider{
properties: props,
}, nil
case *CodeCommitProperties:
return &codecommitProvider{
properties: props,
}, nil
case *BitbucketProperties:
return &bitbucketProvider{
properties: props,
}, nil
default:
return nil, &ErrUnknownProvider{unknownProviderProperties: props}
}
}
// PipelineSchemaMajorVersion is the major version number
// of the pipeline manifest schema
type PipelineSchemaMajorVersion int
const (
// Ver1 is the current schema major version of the pipeline.yml file.
Ver1 PipelineSchemaMajorVersion = iota + 1
)
// PipelineManifest contains information that defines the relationship
// and deployment ordering of your environments.
type PipelineManifest struct {
// Name of the pipeline
Name string `yaml:"name"`
Version PipelineSchemaMajorVersion `yaml:"version"`
Source *Source `yaml:"source"`
Stages []PipelineStage `yaml:"stages"`
parser template.Parser
}
// Source defines the source of the artifacts to be built and deployed.
type Source struct {
ProviderName string `yaml:"provider"`
Properties map[string]interface{} `yaml:"properties"`
}
// PipelineStage represents a stage in the pipeline manifest
type PipelineStage struct {
Name string `yaml:"name"`
RequiresApproval bool `yaml:"requires_approval,omitempty"`
TestCommands []string `yaml:"test_commands,omitempty"`
}
// NewPipelineManifest returns a pipeline manifest object.
func NewPipelineManifest(pipelineName string, provider Provider, stages []PipelineStage) (*PipelineManifest, error) {
// TODO: #221 Do more validations
if len(stages) == 0 {
return nil, fmt.Errorf("a pipeline %s can not be created without a deployment stage",
pipelineName)
}
return &PipelineManifest{
Name: pipelineName,
Version: Ver1,
Source: &Source{
ProviderName: provider.Name(),
Properties: provider.Properties(),
},
Stages: stages,
parser: template.New(),
}, nil
}
// MarshalBinary serializes the pipeline manifest object into byte array that
// represents the pipeline.yml document.
func (m *PipelineManifest) MarshalBinary() ([]byte, error) {
content, err := m.parser.Parse(pipelineManifestPath, *m)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// UnmarshalPipeline deserializes the YAML input stream into a pipeline
// manifest object. It returns an error if any issue occurs during
// deserialization or the YAML input contains invalid fields.
func UnmarshalPipeline(in []byte) (*PipelineManifest, error) {
pm := PipelineManifest{}
err := yaml.Unmarshal(in, &pm)
if err != nil {
return nil, err
}
var version PipelineSchemaMajorVersion
if version, err = validateVersion(&pm); err != nil {
return nil, err
}
// TODO: #221 Do more validations
switch version {
case Ver1:
return &pm, nil
}
// we should never reach here, this is just to make the compiler happy
return nil, errors.New("unexpected error occurs while unmarshalling pipeline.yml")
}
// IsCodeStarConnection indicates to the manifest if this source requires a CSC connection.
func (s Source) IsCodeStarConnection() bool {
switch s.ProviderName {
case GithubProviderName:
return true
case BitbucketProviderName:
return true
default:
return false
}
}
func validateVersion(pm *PipelineManifest) (PipelineSchemaMajorVersion, error) {
switch pm.Version {
case Ver1:
return Ver1, nil
default:
return pm.Version,
&ErrInvalidPipelineManifestVersion{
invalidVersion: pm.Version,
}
}
}
| 1 | 16,954 | Can we define this constant in the`deploy` pkg instead? this would allow us to keep it private | aws-copilot-cli | go |
@@ -251,7 +251,7 @@ def reporter():
@pytest.fixture
def init_linter(linter: PyLinter) -> PyLinter:
linter.open()
- linter.set_current_module("toto")
+ linter.set_current_module("toto", "mydir/toto")
linter.file_state = FileState("toto")
return linter
| 1 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 Kevin Jing Qiu <[email protected]>
# Copyright (c) 2012 Anthony VEREZ <[email protected]>
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Noam Yorav-Raphael <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2017-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2017, 2021 Ville Skyttä <[email protected]>
# Copyright (c) 2017 Craig Citro <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2018, 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2018 Matus Valo <[email protected]>
# Copyright (c) 2018 Scott Worley <[email protected]>
# Copyright (c) 2018 Randall Leeds <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Reverb C <[email protected]>
# Copyright (c) 2019 Janne Rönkkö <[email protected]>
# Copyright (c) 2019 Trevor Bekolay <[email protected]>
# Copyright (c) 2019 Andres Perez Hortal <[email protected]>
# Copyright (c) 2019 Ashley Whetter <[email protected]>
# Copyright (c) 2020 Martin Vielsmaier <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Michal Vasilek <[email protected]>
# Copyright (c) 2021 Eisuke Kawashima <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Andreas Finkler <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# pylint: disable=redefined-outer-name
import os
import re
import sys
import tempfile
from contextlib import contextmanager
from importlib import reload
from io import StringIO
from os import chdir, getcwd
from os.path import abspath, basename, dirname, isdir, join, sep
from shutil import rmtree
from typing import Iterable, Iterator, List, Optional, Tuple
import platformdirs
import pytest
from pytest import CaptureFixture
from pylint import checkers, config, exceptions, interfaces, lint, testutils
from pylint.checkers.utils import check_messages
from pylint.constants import (
MSG_STATE_CONFIDENCE,
MSG_STATE_SCOPE_CONFIG,
MSG_STATE_SCOPE_MODULE,
OLD_DEFAULT_PYLINT_HOME,
)
from pylint.exceptions import InvalidMessageError
from pylint.lint import ArgumentPreprocessingError, PyLinter, Run, preprocess_options
from pylint.message import Message
from pylint.reporters import text
from pylint.typing import MessageLocationTuple
from pylint.utils import FileState, print_full_documentation, tokenize_module
if os.name == "java":
if os.name == "nt":
HOME = "USERPROFILE"
else:
HOME = "HOME"
elif sys.platform == "win32":
HOME = "USERPROFILE"
else:
HOME = "HOME"
@contextmanager
def fake_home() -> Iterator:
folder = tempfile.mkdtemp("fake-home")
old_home = os.environ.get(HOME)
try:
os.environ[HOME] = folder
yield
finally:
os.environ.pop("PYLINTRC", "")
if old_home is None:
del os.environ[HOME]
else:
os.environ[HOME] = old_home
rmtree(folder, ignore_errors=True)
def remove(file):
try:
os.remove(file)
except OSError:
pass
HERE = abspath(dirname(__file__))
INPUT_DIR = join(HERE, "..", "input")
REGRTEST_DATA_DIR = join(HERE, "..", "regrtest_data")
DATA_DIR = join(HERE, "..", "data")
@contextmanager
def tempdir() -> Iterator[str]:
"""Create a temp directory and change the current location to it.
This is supposed to be used with a *with* statement.
"""
tmp = tempfile.mkdtemp()
# Get real path of tempfile, otherwise test fail on mac os x
current_dir = getcwd()
chdir(tmp)
abs_tmp = abspath(".")
try:
yield abs_tmp
finally:
chdir(current_dir)
rmtree(abs_tmp)
def create_files(paths: List[str], chroot: str = ".") -> None:
"""Creates directories and files found in <path>.
:param list paths: list of relative paths to files or directories
:param str chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = join(chroot, path)
filename = basename(path)
# path is a directory path
if filename == "":
dirs.add(path)
# path is a filename path
else:
dirs.add(dirname(path))
files.add(path)
for dirpath in dirs:
if not isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
with open(filepath, "w", encoding="utf-8"):
pass
@pytest.fixture
def fake_path() -> Iterator[Iterable[str]]:
orig = list(sys.path)
fake: Iterable[str] = ["1", "2", "3"]
sys.path[:] = fake
yield fake
sys.path[:] = orig
def test_no_args(fake_path: List[int]) -> None:
with lint.fix_import_path([]):
assert sys.path == fake_path
assert sys.path == fake_path
@pytest.mark.parametrize(
"case", [["a/b/"], ["a/b"], ["a/b/__init__.py"], ["a/"], ["a"]]
)
def test_one_arg(fake_path: List[str], case: List[str]) -> None:
with tempdir() as chroot:
create_files(["a/b/__init__.py"])
expected = [join(chroot, "a")] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.mark.parametrize(
"case",
[
["a/b", "a/c"],
["a/c/", "a/b/"],
["a/b/__init__.py", "a/c/__init__.py"],
["a", "a/c/__init__.py"],
],
)
def test_two_similar_args(fake_path, case):
with tempdir() as chroot:
create_files(["a/b/__init__.py", "a/c/__init__.py"])
expected = [join(chroot, "a")] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.mark.parametrize(
"case",
[
["a/b/c/__init__.py", "a/d/__init__.py", "a/e/f.py"],
["a/b/c", "a", "a/e"],
["a/b/c", "a", "a/b/c", "a/e", "a"],
],
)
def test_more_args(fake_path, case):
with tempdir() as chroot:
create_files(["a/b/c/__init__.py", "a/d/__init__.py", "a/e/f.py"])
expected = [
join(chroot, suffix)
for suffix in (sep.join(("a", "b")), "a", sep.join(("a", "e")))
] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.fixture(scope="module")
def disable():
return ["I"]
@pytest.fixture(scope="module")
def reporter():
return testutils.GenericTestReporter
@pytest.fixture
def init_linter(linter: PyLinter) -> PyLinter:
linter.open()
linter.set_current_module("toto")
linter.file_state = FileState("toto")
return linter
def test_pylint_visit_method_taken_in_account(linter: PyLinter) -> None:
class CustomChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "custom"
msgs = {"W9999": ("", "custom", "")}
@check_messages("custom")
def visit_class(self, _):
pass
linter.register_checker(CustomChecker(linter))
linter.open()
out = StringIO()
linter.set_reporter(text.TextReporter(out))
linter.check(["abc"])
def test_enable_message(init_linter: PyLinter) -> None:
linter = init_linter
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("W0102")
linter.disable("W0101", scope="package")
linter.disable("W0102", scope="module", line=1)
assert not linter.is_message_enabled("W0101")
assert not linter.is_message_enabled("W0102", 1)
linter.set_current_module("tutu")
assert not linter.is_message_enabled("W0101")
assert linter.is_message_enabled("W0102")
linter.enable("W0101", scope="package")
linter.enable("W0102", scope="module", line=1)
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("W0102", 1)
def test_enable_message_category(init_linter: PyLinter) -> None:
linter = init_linter
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
linter.disable("W", scope="package")
linter.disable("C", scope="module", line=1)
assert not linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
assert not linter.is_message_enabled("C0202", line=1)
linter.set_current_module("tutu")
assert not linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
linter.enable("W", scope="package")
linter.enable("C", scope="module", line=1)
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("C0202")
assert linter.is_message_enabled("C0202", line=1)
def test_message_state_scope(init_linter: PyLinter) -> None:
class FakeConfig:
confidence = ["HIGH"]
linter = init_linter
linter.disable("C0202")
assert MSG_STATE_SCOPE_CONFIG == linter._get_message_state_scope("C0202")
linter.disable("W0101", scope="module", line=3)
assert MSG_STATE_SCOPE_CONFIG == linter._get_message_state_scope("C0202")
assert MSG_STATE_SCOPE_MODULE == linter._get_message_state_scope("W0101", 3)
linter.enable("W0102", scope="module", line=3)
assert MSG_STATE_SCOPE_MODULE == linter._get_message_state_scope("W0102", 3)
linter.config = FakeConfig()
assert MSG_STATE_CONFIDENCE == linter._get_message_state_scope(
"this-is-bad", confidence=interfaces.INFERENCE
)
def test_enable_message_block(init_linter: PyLinter) -> None:
linter = init_linter
linter.open()
filepath = join(REGRTEST_DATA_DIR, "func_block_disable_msg.py")
linter.set_current_module("func_block_disable_msg")
astroid = linter.get_ast(filepath, "func_block_disable_msg")
linter.process_tokens(tokenize_module(astroid))
fs = linter.file_state
fs.collect_block_lines(linter.msgs_store, astroid)
# global (module level)
assert linter.is_message_enabled("W0613")
assert linter.is_message_enabled("E1101")
# meth1
assert linter.is_message_enabled("W0613", 13)
# meth2
assert not linter.is_message_enabled("W0613", 18)
# meth3
assert not linter.is_message_enabled("E1101", 24)
assert linter.is_message_enabled("E1101", 26)
# meth4
assert not linter.is_message_enabled("E1101", 32)
assert linter.is_message_enabled("E1101", 36)
# meth5
assert not linter.is_message_enabled("E1101", 42)
assert not linter.is_message_enabled("E1101", 43)
assert linter.is_message_enabled("E1101", 46)
assert not linter.is_message_enabled("E1101", 49)
assert not linter.is_message_enabled("E1101", 51)
# meth6
assert not linter.is_message_enabled("E1101", 57)
assert linter.is_message_enabled("E1101", 61)
assert not linter.is_message_enabled("E1101", 64)
assert not linter.is_message_enabled("E1101", 66)
assert linter.is_message_enabled("E0602", 57)
assert linter.is_message_enabled("E0602", 61)
assert not linter.is_message_enabled("E0602", 62)
assert linter.is_message_enabled("E0602", 64)
assert linter.is_message_enabled("E0602", 66)
# meth7
assert not linter.is_message_enabled("E1101", 70)
assert linter.is_message_enabled("E1101", 72)
assert linter.is_message_enabled("E1101", 75)
assert linter.is_message_enabled("E1101", 77)
fs = linter.file_state
assert fs._suppression_mapping["W0613", 18] == 17
assert fs._suppression_mapping["E1101", 33] == 30
assert ("E1101", 46) not in fs._suppression_mapping
assert fs._suppression_mapping["C0302", 18] == 1
assert fs._suppression_mapping["C0302", 50] == 1
# This is tricky. While the disable in line 106 is disabling
# both 108 and 110, this is usually not what the user wanted.
# Therefore, we report the closest previous disable comment.
assert fs._suppression_mapping["E1101", 108] == 106
assert fs._suppression_mapping["E1101", 110] == 109
def test_enable_by_symbol(init_linter: PyLinter) -> None:
"""messages can be controlled by symbolic names.
The state is consistent across symbols and numbers.
"""
linter = init_linter
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("unreachable")
assert linter.is_message_enabled("W0102")
assert linter.is_message_enabled("dangerous-default-value")
linter.disable("unreachable", scope="package")
linter.disable("dangerous-default-value", scope="module", line=1)
assert not linter.is_message_enabled("W0101")
assert not linter.is_message_enabled("unreachable")
assert not linter.is_message_enabled("W0102", 1)
assert not linter.is_message_enabled("dangerous-default-value", 1)
linter.set_current_module("tutu")
assert not linter.is_message_enabled("W0101")
assert not linter.is_message_enabled("unreachable")
assert linter.is_message_enabled("W0102")
assert linter.is_message_enabled("dangerous-default-value")
linter.enable("unreachable", scope="package")
linter.enable("dangerous-default-value", scope="module", line=1)
assert linter.is_message_enabled("W0101")
assert linter.is_message_enabled("unreachable")
assert linter.is_message_enabled("W0102", 1)
assert linter.is_message_enabled("dangerous-default-value", 1)
def test_enable_report(linter: PyLinter) -> None:
assert linter.report_is_enabled("RP0001")
linter.disable("RP0001")
assert not linter.report_is_enabled("RP0001")
linter.enable("RP0001")
assert linter.report_is_enabled("RP0001")
def test_report_output_format_aliased(linter: PyLinter) -> None:
text.register(linter)
linter.set_option("output-format", "text")
assert linter.reporter.__class__.__name__ == "TextReporter"
def test_set_unsupported_reporter(linter: PyLinter) -> None:
text.register(linter)
with pytest.raises(exceptions.InvalidReporterError):
linter.set_option("output-format", "missing.module.Class")
def test_set_option_1(linter: PyLinter) -> None:
linter.set_option("disable", "C0111,W0234")
assert not linter.is_message_enabled("C0111")
assert not linter.is_message_enabled("W0234")
assert linter.is_message_enabled("W0113")
assert not linter.is_message_enabled("missing-docstring")
assert not linter.is_message_enabled("non-iterator-returned")
def test_set_option_2(linter: PyLinter) -> None:
linter.set_option("disable", ("C0111", "W0234"))
assert not linter.is_message_enabled("C0111")
assert not linter.is_message_enabled("W0234")
assert linter.is_message_enabled("W0113")
assert not linter.is_message_enabled("missing-docstring")
assert not linter.is_message_enabled("non-iterator-returned")
def test_enable_checkers(linter: PyLinter) -> None:
linter.disable("design")
assert not ("design" in [c.name for c in linter.prepare_checkers()])
linter.enable("design")
assert "design" in [c.name for c in linter.prepare_checkers()]
def test_errors_only(linter: PyLinter) -> None:
linter.error_mode()
checkers = linter.prepare_checkers()
checker_names = {c.name for c in checkers}
should_not = {"design", "format", "metrics", "miscellaneous", "similarities"}
assert set() == should_not & checker_names
def test_disable_similar(linter: PyLinter) -> None:
linter.set_option("disable", "RP0801")
linter.set_option("disable", "R0801")
assert not ("similarities" in [c.name for c in linter.prepare_checkers()])
def test_disable_alot(linter: PyLinter) -> None:
"""check that we disabled a lot of checkers"""
linter.set_option("reports", False)
linter.set_option("disable", "R,C,W")
checker_names = [c.name for c in linter.prepare_checkers()]
for cname in ("design", "metrics", "similarities"):
assert not (cname in checker_names), cname
def test_addmessage(linter: PyLinter) -> None:
linter.set_reporter(testutils.GenericTestReporter())
linter.open()
linter.set_current_module("0123")
linter.add_message("C0301", line=1, args=(1, 2))
linter.add_message("line-too-long", line=2, args=(3, 4))
assert len(linter.reporter.messages) == 2
assert linter.reporter.messages[0] == Message(
msg_id="C0301",
symbol="line-too-long",
msg="Line too long (1/2)",
confidence=interfaces.Confidence(
name="UNDEFINED",
description="Warning without any associated confidence level.",
),
location=MessageLocationTuple(
abspath="0123",
path="0123",
module="0123",
obj="",
line=1,
column=0,
end_line=None,
end_column=None,
),
)
assert linter.reporter.messages[1] == Message(
msg_id="C0301",
symbol="line-too-long",
msg="Line too long (3/4)",
confidence=interfaces.Confidence(
name="UNDEFINED",
description="Warning without any associated confidence level.",
),
location=MessageLocationTuple(
abspath="0123",
path="0123",
module="0123",
obj="",
line=2,
column=0,
end_line=None,
end_column=None,
),
)
def test_addmessage_invalid(linter: PyLinter) -> None:
linter.set_reporter(testutils.GenericTestReporter())
linter.open()
linter.set_current_module("0123")
with pytest.raises(InvalidMessageError) as cm:
linter.add_message("line-too-long", args=(1, 2))
assert str(cm.value) == "Message C0301 must provide line, got None"
with pytest.raises(InvalidMessageError) as cm:
linter.add_message("line-too-long", line=2, node="fake_node", args=(1, 2))
assert (
str(cm.value)
== "Message C0301 must only provide line, got line=2, node=fake_node"
)
with pytest.raises(InvalidMessageError) as cm:
linter.add_message("C0321")
assert str(cm.value) == "Message C0321 must provide Node, got None"
def test_load_plugin_command_line() -> None:
dummy_plugin_path = join(REGRTEST_DATA_DIR, "dummy_plugin")
sys.path.append(dummy_plugin_path)
run = Run(
["--load-plugins", "dummy_plugin", join(REGRTEST_DATA_DIR, "empty.py")],
exit=False,
)
assert (
len([ch.name for ch in run.linter.get_checkers() if ch.name == "dummy_plugin"])
== 2
)
sys.path.remove(dummy_plugin_path)
def test_load_plugin_config_file() -> None:
dummy_plugin_path = join(REGRTEST_DATA_DIR, "dummy_plugin")
sys.path.append(dummy_plugin_path)
config_path = join(REGRTEST_DATA_DIR, "dummy_plugin.rc")
run = Run(
["--rcfile", config_path, join(REGRTEST_DATA_DIR, "empty.py")],
exit=False,
)
assert (
len([ch.name for ch in run.linter.get_checkers() if ch.name == "dummy_plugin"])
== 2
)
sys.path.remove(dummy_plugin_path)
def test_load_plugin_configuration() -> None:
dummy_plugin_path = join(REGRTEST_DATA_DIR, "dummy_plugin")
sys.path.append(dummy_plugin_path)
run = Run(
[
"--load-plugins",
"dummy_conf_plugin",
"--ignore",
"foo,bar",
join(REGRTEST_DATA_DIR, "empty.py"),
],
exit=False,
)
assert run.linter.config.black_list == ["foo", "bar", "bin"]
def test_init_hooks_called_before_load_plugins() -> None:
with pytest.raises(RuntimeError):
Run(["--load-plugins", "unexistant", "--init-hook", "raise RuntimeError"])
with pytest.raises(RuntimeError):
Run(["--init-hook", "raise RuntimeError", "--load-plugins", "unexistant"])
def test_analyze_explicit_script(linter: PyLinter) -> None:
linter.set_reporter(testutils.GenericTestReporter())
linter.check([os.path.join(DATA_DIR, "ascript")])
assert len(linter.reporter.messages) == 1
assert linter.reporter.messages[0] == Message(
msg_id="C0301",
symbol="line-too-long",
msg="Line too long (175/100)",
confidence=interfaces.Confidence(
name="UNDEFINED",
description="Warning without any associated confidence level.",
),
location=MessageLocationTuple(
abspath=os.path.join(abspath(dirname(__file__)), "ascript").replace(
f"lint{os.path.sep}ascript", f"data{os.path.sep}ascript"
),
path=f"tests{os.path.sep}data{os.path.sep}ascript",
module="data.ascript",
obj="",
line=2,
column=0,
end_line=None,
end_column=None,
),
)
def test_full_documentation(linter: PyLinter) -> None:
out = StringIO()
print_full_documentation(linter, out)
output = out.getvalue()
# A few spot checks only
for re_str in (
# autogenerated text
"^Pylint global options and switches$",
"Verbatim name of the checker is ``variables``",
# messages
"^:undefined-loop-variable \\(W0631\\): *",
# options
"^:dummy-variables-rgx:",
):
regexp = re.compile(re_str, re.MULTILINE)
assert re.search(regexp, output)
def test_list_msgs_enabled(init_linter: PyLinter, capsys: CaptureFixture) -> None:
linter = init_linter
linter.enable("W0101", scope="package")
linter.disable("W0102", scope="package")
linter.list_messages_enabled()
lines = capsys.readouterr().out.splitlines()
assert "Enabled messages:" in lines
assert " unreachable (W0101)" in lines
assert "Disabled messages:" in lines
disabled_ix = lines.index("Disabled messages:")
# W0101 should be in the enabled section
assert lines.index(" unreachable (W0101)") < disabled_ix
assert " dangerous-default-value (W0102)" in lines
# W0102 should be in the disabled section
assert lines.index(" dangerous-default-value (W0102)") > disabled_ix
@pytest.fixture
def pop_pylintrc() -> None:
os.environ.pop("PYLINTRC", None)
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylint_home() -> None:
uhome = os.path.expanduser("~")
if uhome == "~":
expected = OLD_DEFAULT_PYLINT_HOME
else:
expected = platformdirs.user_cache_dir("pylint")
assert config.PYLINT_HOME == expected
try:
pylintd = join(tempfile.gettempdir(), OLD_DEFAULT_PYLINT_HOME)
os.environ["PYLINTHOME"] = pylintd
try:
reload(config)
assert config.PYLINT_HOME == pylintd
finally:
try:
rmtree(pylintd)
except FileNotFoundError:
pass
finally:
del os.environ["PYLINTHOME"]
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc() -> None:
with fake_home():
current_dir = getcwd()
chdir(os.path.dirname(os.path.abspath(sys.executable)))
try:
assert config.find_pylintrc() is None
os.environ["PYLINTRC"] = join(tempfile.gettempdir(), ".pylintrc")
assert config.find_pylintrc() is None
os.environ["PYLINTRC"] = "."
assert config.find_pylintrc() is None
finally:
chdir(current_dir)
reload(config)
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc_parentdir() -> None:
with tempdir() as chroot:
create_files(
[
"a/pylintrc",
"a/b/__init__.py",
"a/b/pylintrc",
"a/b/c/__init__.py",
"a/b/c/d/__init__.py",
"a/b/c/d/e/.pylintrc",
]
)
with fake_home():
assert config.find_pylintrc() is None
results = {
"a": join(chroot, "a", "pylintrc"),
"a/b": join(chroot, "a", "b", "pylintrc"),
"a/b/c": join(chroot, "a", "b", "pylintrc"),
"a/b/c/d": join(chroot, "a", "b", "pylintrc"),
"a/b/c/d/e": join(chroot, "a", "b", "c", "d", "e", ".pylintrc"),
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
assert config.find_pylintrc() == expected
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc_parentdir_no_package() -> None:
with tempdir() as chroot:
with fake_home():
create_files(["a/pylintrc", "a/b/pylintrc", "a/b/c/d/__init__.py"])
assert config.find_pylintrc() is None
results = {
"a": join(chroot, "a", "pylintrc"),
"a/b": join(chroot, "a", "b", "pylintrc"),
"a/b/c": None,
"a/b/c/d": None,
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
assert config.find_pylintrc() == expected
class TestPreprocessOptions:
def _callback(self, name: str, value: Optional[str]) -> None:
self.args.append((name, value))
def test_value_equal(self) -> None:
self.args: List[Tuple[str, Optional[str]]] = []
preprocess_options(
["--foo", "--bar=baz", "--qu=ux"],
{"foo": (self._callback, False), "qu": (self._callback, True)},
)
assert [("foo", None), ("qu", "ux")] == self.args
def test_value_space(self) -> None:
self.args = []
preprocess_options(["--qu", "ux"], {"qu": (self._callback, True)})
assert [("qu", "ux")] == self.args
@staticmethod
def test_error_missing_expected_value() -> None:
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(["--foo", "--bar", "--qu=ux"], {"bar": (None, True)})
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(["--foo", "--bar"], {"bar": (None, True)})
@staticmethod
def test_error_unexpected_value() -> None:
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(
["--foo", "--bar=spam", "--qu=ux"], {"bar": (None, False)}
)
class _CustomPyLinter(PyLinter):
@staticmethod
def should_analyze_file(modname: str, path: str, is_argument: bool = False) -> bool:
if os.path.basename(path) == "wrong.py":
return False
return super(_CustomPyLinter, _CustomPyLinter).should_analyze_file(
modname, path, is_argument=is_argument
)
def test_custom_should_analyze_file() -> None:
"""Check that we can write custom should_analyze_file that work
even for arguments.
"""
package_dir = os.path.join(REGRTEST_DATA_DIR, "bad_package")
wrong_file = os.path.join(package_dir, "wrong.py")
for jobs in (1, 2):
reporter = testutils.GenericTestReporter()
linter = _CustomPyLinter()
linter.config.jobs = jobs
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(package_dir))
linter.check([package_dir, wrong_file])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == 1
assert "invalid syntax" in messages[0].msg
# we do the check with jobs=1 as well, so that we are sure that the duplicates
# are created by the multiprocessing problem.
@pytest.mark.parametrize("jobs", [1, 2])
def test_multiprocessing(jobs: int) -> None:
"""Check that multiprocessing does not create duplicates."""
# For the bug (#3584) to show up we need more than one file with issues
# per process
filenames = [
"special_attr_scope_lookup_crash.py",
"syntax_error.py",
"unused_variable.py",
"wildcard.py",
"wrong_import_position.py",
]
reporter = testutils.GenericTestReporter()
linter = PyLinter()
linter.config.jobs = jobs
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(REGRTEST_DATA_DIR))
linter.check([os.path.join(REGRTEST_DATA_DIR, fname) for fname in filenames])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == len(set(messages))
def test_filename_with__init__(init_linter: PyLinter) -> None:
# This tracks a regression where a file whose name ends in __init__.py,
# such as flycheck__init__.py, would accidentally lead to linting the
# entire containing directory.
reporter = testutils.GenericTestReporter()
linter = init_linter
linter.open()
linter.set_reporter(reporter)
filepath = join(INPUT_DIR, "not__init__.py")
linter.check([filepath])
messages = reporter.messages
assert len(messages) == 0
def test_by_module_statement_value(init_linter: PyLinter) -> None:
"""Test "statement" for each module analyzed of computed correctly."""
linter = init_linter
linter.check([os.path.join(os.path.dirname(__file__), "data")])
by_module_stats = linter.stats.by_module
for module, module_stats in by_module_stats.items():
linter2 = init_linter
if module == "data":
linter2.check([os.path.join(os.path.dirname(__file__), "data/__init__.py")])
else:
linter2.check([os.path.join(os.path.dirname(__file__), module)])
# Check that the by_module "statement" is equal to the global "statement"
# computed for that module
assert module_stats["statement"] == linter2.stats.statement
| 1 | 19,440 | I don't like this fixture name, should be a noun like `initialized_linter` ? But it's outside of the scope of this MR. | PyCQA-pylint | py |
@@ -154,7 +154,9 @@ public class ExportService {
if (releasedVersion == null) {
throw new ExportException("No released version for dataset " + dataset.getGlobalId().toString());
}
- final JsonObjectBuilder datasetAsJsonBuilder = JsonPrinter.jsonAsDatasetDto(releasedVersion);
+ // We pass settingsService into the JsonPrinter constructor to check the :ExcludeEmailFromExport setting.
+ JsonPrinter jsonPrinter = new JsonPrinter(settingsService);
+ final JsonObjectBuilder datasetAsJsonBuilder = jsonPrinter.jsonAsDatasetDto(releasedVersion);
JsonObject datasetAsJson = datasetAsJsonBuilder.build();
Iterator<Exporter> exporters = loader.iterator(); | 1 | package edu.harvard.iq.dataverse.export;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetVersion;
import static edu.harvard.iq.dataverse.GlobalIdServiceBean.logger;
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
import static edu.harvard.iq.dataverse.dataaccess.DataAccess.getStorageIO;
import edu.harvard.iq.dataverse.dataaccess.DataAccessOption;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
import edu.harvard.iq.dataverse.export.spi.Exporter;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.json.JsonPrinter;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.nio.channels.Channel;
import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.ServiceConfigurationError;
import java.util.ServiceLoader;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import javax.ws.rs.core.MediaType;
import org.apache.commons.io.IOUtils;
/**
*
* @author skraffmi
*/
public class ExportService {
private static ExportService service;
private ServiceLoader<Exporter> loader;
static SettingsServiceBean settingsService;
private ExportService() {
loader = ServiceLoader.load(Exporter.class);
}
/**
* @deprecated Use `getInstance(SettingsServiceBean settingsService)`
* instead. For privacy reasons, we need to pass in settingsService so that
* we can make a decision whether not not to exclude email addresses. No new
* code should call this method and it would be nice to remove calls from
* existing code.
*/
@Deprecated
public static synchronized ExportService getInstance() {
return getInstance(null);
}
public static synchronized ExportService getInstance(SettingsServiceBean settingsService) {
ExportService.settingsService = settingsService;
if (service == null) {
service = new ExportService();
}
return service;
}
public List< String[]> getExportersLabels() {
List<String[]> retList = new ArrayList<>();
Iterator<Exporter> exporters = ExportService.getInstance(null).loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
String[] temp = new String[2];
temp[0] = e.getDisplayName();
temp[1] = e.getProviderName();
retList.add(temp);
}
return retList;
}
public InputStream getExport(Dataset dataset, String formatName) throws ExportException, IOException {
// first we will try to locate an already existing, cached export
// for this format:
InputStream exportInputStream = getCachedExportFormat(dataset, formatName);
if (exportInputStream != null) {
return exportInputStream;
}
// if it doesn't exist, we'll try to run the export:
exportFormat(dataset, formatName);
// and then try again:
exportInputStream = getCachedExportFormat(dataset, formatName);
if (exportInputStream != null) {
return exportInputStream;
}
// if there is no cached export still - we have to give up and throw
// an exception!
throw new ExportException("Failed to export the dataset as " + formatName);
}
public String getExportAsString(Dataset dataset, String formatName) {
InputStream inputStream = null;
InputStreamReader inp = null;
try {
inputStream = getExport(dataset, formatName);
if (inputStream != null) {
inp = new InputStreamReader(inputStream, "UTF8");
BufferedReader br = new BufferedReader(inp);
StringBuilder sb = new StringBuilder();
String line;
while ((line = br.readLine()) != null) {
sb.append(line);
sb.append('\n');
}
br.close();
inp.close();
inputStream.close();
return sb.toString();
}
} catch (ExportException | IOException ex) {
//ex.printStackTrace();
return null;
} finally {
IOUtils.closeQuietly(inp);
IOUtils.closeQuietly(inputStream);
}
return null;
}
// This method goes through all the Exporters and calls
// the "chacheExport()" method that will save the produced output
// in a file in the dataset directory, on each Exporter available.
public void exportAllFormats(Dataset dataset) throws ExportException {
try {
clearAllCachedFormats(dataset);
} catch (IOException ex) {
Logger.getLogger(ExportService.class.getName()).log(Level.SEVERE, null, ex);
}
try {
DatasetVersion releasedVersion = dataset.getReleasedVersion();
if (releasedVersion == null) {
throw new ExportException("No released version for dataset " + dataset.getGlobalId().toString());
}
final JsonObjectBuilder datasetAsJsonBuilder = JsonPrinter.jsonAsDatasetDto(releasedVersion);
JsonObject datasetAsJson = datasetAsJsonBuilder.build();
Iterator<Exporter> exporters = loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
String formatName = e.getProviderName();
cacheExport(releasedVersion, formatName, datasetAsJson, e);
}
} catch (ServiceConfigurationError serviceError) {
throw new ExportException("Service configuration error during export. " + serviceError.getMessage());
}
// Finally, if we have been able to successfully export in all available
// formats, we'll increment the "last exported" time stamp:
dataset.setLastExportTime(new Timestamp(new Date().getTime()));
}
public void clearAllCachedFormats(Dataset dataset) throws IOException {
try {
Iterator<Exporter> exporters = loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
String formatName = e.getProviderName();
clearCachedExport(dataset, formatName);
}
dataset.setLastExportTime(null);
} catch (IOException ex) {
//not fatal
}
}
// This method finds the exporter for the format requested,
// then produces the dataset metadata as a JsonObject, then calls
// the "cacheExport()" method that will save the produced output
// in a file in the dataset directory.
public void exportFormat(Dataset dataset, String formatName) throws ExportException {
try {
Iterator<Exporter> exporters = loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
if (e.getProviderName().equals(formatName)) {
DatasetVersion releasedVersion = dataset.getReleasedVersion();
if (releasedVersion == null) {
throw new IllegalStateException("No Released Version");
}
final JsonObjectBuilder datasetAsJsonBuilder = JsonPrinter.jsonAsDatasetDto(releasedVersion);
cacheExport(releasedVersion, formatName, datasetAsJsonBuilder.build(), e);
}
}
} catch (ServiceConfigurationError serviceError) {
throw new ExportException("Service configuration error during export. " + serviceError.getMessage());
} catch (IllegalStateException e) {
throw new ExportException("No published version found during export. " + dataset.getGlobalId().toString());
}
}
public Exporter getExporter(String formatName) throws ExportException {
try {
Iterator<Exporter> exporters = loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
if (e.getProviderName().equals(formatName)) {
return e;
}
}
} catch (ServiceConfigurationError serviceError) {
throw new ExportException("Service configuration error during export. " + serviceError.getMessage());
} catch (Exception ex) {
throw new ExportException("Could not find Exporter \"" + formatName + "\", unknown exception");
}
throw new ExportException("No such Exporter: " + formatName);
}
// This method runs the selected metadata exporter, caching the output
// in a file in the dataset directory / container based on its DOI:
private void cacheExport(DatasetVersion version, String format, JsonObject datasetAsJson, Exporter exporter) throws ExportException {
boolean tempFileRequired = false;
File tempFile = null;
OutputStream outputStream = null;
Dataset dataset = version.getDataset();
StorageIO<Dataset> storageIO = null;
try {
// With some storage drivers, we can open a WritableChannel, or OutputStream
// to directly write the generated metadata export that we want to cache;
// Some drivers (like Swift) do not support that, and will give us an
// "operation not supported" exception. If that's the case, we'll have
// to save the output into a temp file, and then copy it over to the
// permanent storage using the IO "save" command:
try {
storageIO = DataAccess.createNewStorageIO(dataset, "placeholder");
Channel outputChannel = storageIO.openAuxChannel("export_" + format + ".cached", DataAccessOption.WRITE_ACCESS);
outputStream = Channels.newOutputStream((WritableByteChannel) outputChannel);
} catch (IOException ioex) {
tempFileRequired = true;
tempFile = File.createTempFile("tempFileToExport", ".tmp");
outputStream = new FileOutputStream(tempFile);
}
try {
Path cachedMetadataFilePath = Paths.get(version.getDataset().getFileSystemDirectory().toString(), "export_" + format + ".cached");
if (!tempFileRequired) {
FileOutputStream cachedExportOutputStream = new FileOutputStream(cachedMetadataFilePath.toFile());
exporter.exportDataset(version, datasetAsJson, cachedExportOutputStream);
cachedExportOutputStream.flush();
cachedExportOutputStream.close();
outputStream.close();
} else {
// this method copies a local filesystem Path into this DataAccess Auxiliary location:
exporter.exportDataset(version, datasetAsJson, outputStream);
outputStream.flush();
outputStream.close();
logger.fine("Saving path as aux for temp file in: " + Paths.get(tempFile.getAbsolutePath()));
storageIO.savePathAsAux(Paths.get(tempFile.getAbsolutePath()), "export_" + format + ".cached");
boolean tempFileDeleted = tempFile.delete();
logger.fine("tempFileDeleted: " + tempFileDeleted);
}
} catch (IOException ioex) {
throw new ExportException("IO Exception thrown exporting as " + "export_" + format + ".cached");
}
} catch (IOException ioex) {
throw new ExportException("IO Exception thrown exporting as " + "export_" + format + ".cached");
} finally {
IOUtils.closeQuietly(outputStream);
}
}
private void clearCachedExport(Dataset dataset, String format) throws IOException {
try {
StorageIO<Dataset> storageIO = getStorageIO(dataset);
storageIO.deleteAuxObject("export_" + format + ".cached");
} catch (IOException ex) {
throw new IOException("IO Exception thrown exporting as " + "export_" + format + ".cached");
}
}
// This method checks if the metadata has already been exported in this
// format and cached on disk. If it has, it'll open the file and retun
// the file input stream. If not, it'll return null.
private InputStream getCachedExportFormat(Dataset dataset, String formatName) throws ExportException, IOException {
StorageIO<Dataset> dataAccess = null;
try {
dataAccess = DataAccess.getStorageIO(dataset);
} catch (IOException ioex) {
throw new IOException("IO Exception thrown exporting as " + "export_" + formatName + ".cached");
}
InputStream cachedExportInputStream = null;
try {
cachedExportInputStream = dataAccess.getAuxFileAsInputStream("export_" + formatName + ".cached");
return cachedExportInputStream;
} catch (IOException ioex) {
throw new IOException("IO Exception thrown exporting as " + "export_" + formatName + ".cached");
}
}
/*The below method, getCachedExportSize(), is not currently used.
*An exercise for the reader could be to refactor it if it's needed
*to be compatible with storage drivers other than local filesystem.
*Files.exists() would need to be discarded.
* -- L.A. 4.8 */
// public Long getCachedExportSize(Dataset dataset, String formatName) {
// try {
// if (dataset.getFileSystemDirectory() != null) {
// Path cachedMetadataFilePath = Paths.get(dataset.getFileSystemDirectory().toString(), "export_" + formatName + ".cached");
// if (Files.exists(cachedMetadataFilePath)) {
// return cachedMetadataFilePath.toFile().length();
// }
// }
// } catch (Exception ioex) {
// // don't do anything - we'll just return null
// }
//
// return null;
// }
public Boolean isXMLFormat(String provider) {
try {
Iterator<Exporter> exporters = loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
if (e.getProviderName().equals(provider)) {
return e.isXMLFormat();
}
}
} catch (ServiceConfigurationError serviceError) {
serviceError.printStackTrace();
}
return null;
}
public String getMediaType(String provider) {
try {
Iterator<Exporter> exporters = loader.iterator();
while (exporters.hasNext()) {
Exporter e = exporters.next();
if (e.getProviderName().equals(provider)) {
return e.getMediaType();
}
}
} catch (ServiceConfigurationError serviceError) {
serviceError.printStackTrace();
}
return MediaType.TEXT_PLAIN;
}
}
| 1 | 38,872 | Could we keep this as a static reference (JsonPrinter.jsonAsDatasetDto(releasedVersion); and perhaps change the constructor line to just be a static set method and use that here instead? (I suspect it was the warning that 'static methods should be accessed in a static manner' from my IDE that caused me to make this change in the first place...) (I can make a PR to your branch if you want.) | IQSS-dataverse | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.