gt
stringclasses 1
value | context
stringlengths 2.05k
161k
|
---|---|
/*
* Copyright (C) 2014 Antew
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.antew.redditinpictures.library.ui;
import android.app.Fragment;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.view.View;
import android.widget.AbsListView;
import android.widget.ListView;
import android.widget.Toast;
import butterknife.InjectView;
import com.antew.redditinpictures.library.Constants;
import com.antew.redditinpictures.library.adapter.ImageListCursorAdapter;
import com.antew.redditinpictures.library.database.QueryCriteria;
import com.antew.redditinpictures.library.database.RedditContract;
import com.antew.redditinpictures.library.event.ForcePostRefreshEvent;
import com.antew.redditinpictures.library.event.RequestCompletedEvent;
import com.antew.redditinpictures.library.event.RequestInProgressEvent;
import com.antew.redditinpictures.library.event.SaveImageEvent;
import com.antew.redditinpictures.library.model.Age;
import com.antew.redditinpictures.library.model.Category;
import com.antew.redditinpictures.library.model.reddit.PostData;
import com.antew.redditinpictures.library.preferences.SharedPreferencesHelper;
import com.antew.redditinpictures.library.service.RedditService;
import com.antew.redditinpictures.library.widget.SwipeListView;
import com.antew.redditinpictures.pro.R;
import com.google.analytics.tracking.android.EasyTracker;
import com.google.analytics.tracking.android.MapBuilder;
import com.squareup.otto.Subscribe;
public class RedditImageListFragment extends RedditImageAdapterViewFragment<ListView, ImageListCursorAdapter>
implements ImageListCursorAdapter.ImageListItemMenuActionListener {
//8 is a good number, the kind of number that you could say take home to your parents and not be worried about what they might think about it.
private static final int POST_LOAD_OFFSET = 8;
private AbsListView.OnScrollListener mListScrollListener = new AbsListView.OnScrollListener() {
@Override
public void onScrollStateChanged(AbsListView absListView, int scrollState) {
}
@Override
public void onScroll(AbsListView absListView, int firstVisibleItem, int visibleItemCount, int totalItemCount) {
// if we're are approaching the bottom of the listview, load more data.
if (!mRequestInProgress && firstVisibleItem + visibleItemCount >= totalItemCount - POST_LOAD_OFFSET && totalItemCount > 0) {
fetchAdditionalImagesFromReddit();
}
}
};
private static final QueryCriteria mQueryCriteria = new QueryCriteria(RedditContract.Posts.LISTVIEW_PROJECTION,
RedditContract.Posts.DEFAULT_SORT);
@InjectView(R.id.image_list)
protected SwipeListView mImageListView;
public static Fragment newInstance(String subreddit, Category category, Age age) {
final Fragment f = new RedditImageListFragment();
final Bundle args = new Bundle();
args.putString(Constants.Extra.EXTRA_SUBREDDIT, subreddit);
if (category != null) {
args.putString(Constants.Extra.EXTRA_CATEGORY, category.getName());
}
if (age != null) {
args.putString(Constants.Extra.EXTRA_AGE, age.getAge());
}
f.setArguments(args);
return f;
}
/**
* If we're forcing a refresh from Reddit we want
* to discard the old posts so that the user has
* a better indication we are fetching posts anew.
*
* @param event
*/
@Override
@Subscribe
public void handleForcePostRefreshEvent(ForcePostRefreshEvent event) {
super.handleForcePostRefreshEvent(event);
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
mImageListView.setAdapter(mAdapter);
mImageListView.setOnScrollListener(mListScrollListener);
}
@Override
public void onDestroyView() {
super.onDestroyView();
mImageListView = null;
}
@Override
public void onPostsLoaded() {
if (mVisiblePosition != null) {
mImageListView.setSelectionFromTop(mVisiblePosition, mTopOffset);
mVisiblePosition = null;
mTopOffset = null;
}
}
private void openImageAtPosition(int position) {
final Intent i = new Intent(getActivity(), getImageDetailActivityClass());
Bundle b = new Bundle();
b.putString(Constants.Extra.EXTRA_SUBREDDIT, mCurrentSubreddit);
b.putString(Constants.Extra.EXTRA_CATEGORY, mCategory.name());
b.putString(Constants.Extra.EXTRA_AGE, mAge.name());
i.putExtra(Constants.Extra.EXTRA_IMAGE, position);
i.putExtras(b);
startActivity(i);
}
@Override
protected int getLayoutId() {
return R.layout.image_list_fragment;
}
@Override
protected ListView getAdapterView() {
return mImageListView;
}
@Override
protected ImageListCursorAdapter getNewAdapter() {
return new ImageListCursorAdapter(getActivity(), this);
}
@Override
protected QueryCriteria getPostsQueryCriteria() {
return mQueryCriteria;
}
/**
* Request to view the given image.
*
* @param postData
* The PostData of the image to open
* @param position
*/
@Override
public void viewImage(PostData postData, int position) {
EasyTracker.getInstance(getActivity()).send(MapBuilder.createEvent(Constants.Analytics.Category.POST_MENU_ACTION, Constants.Analytics.Action.OPEN_POST,
mCurrentSubreddit, null).build()
);
openImageAtPosition(position);
}
/**
* Request to save the given image.
*
* @param postData
* The PostData of the image.
*/
@Override
public void saveImage(PostData postData) {
EasyTracker.getInstance(getActivity())
.send(MapBuilder.createEvent(Constants.Analytics.Category.POST_MENU_ACTION, Constants.Analytics.Action.SAVE_POST,
mCurrentSubreddit, null).build()
);
mBus.post(new SaveImageEvent(postData));
}
/**
* Request to share the given image.
*
* @param postData
* The PostData of the image.
*/
@Override
public void shareImage(PostData postData) {
EasyTracker.getInstance(getActivity())
.send(MapBuilder.createEvent(Constants.Analytics.Category.POST_MENU_ACTION, Constants.Analytics.Action.SHARE_POST,
mCurrentSubreddit, null).build()
);
String subject = getString(R.string.check_out_this_image);
Intent intent = new Intent(Intent.ACTION_SEND);
intent.setType("text/plain");
intent.putExtra(Intent.EXTRA_SUBJECT, subject);
intent.putExtra(Intent.EXTRA_TEXT, subject + " " + postData.getUrl());
startActivity(Intent.createChooser(intent, getString(R.string.share_using_)));
}
/**
* Request to open the given image in an external application.
*
* @param postData
* The PostData of the image.
*/
@Override
public void openPostExternal(PostData postData) {
EasyTracker.getInstance(getActivity())
.send(
MapBuilder.createEvent(Constants.Analytics.Category.POST_MENU_ACTION, Constants.Analytics.Action.OPEN_POST_EXTERNAL,
mCurrentSubreddit, null).build()
);
Intent browserIntent = new Intent(Intent.ACTION_VIEW, Uri.parse(
postData.getFullPermalink(SharedPreferencesHelper.getUseMobileInterface(getActivity()))));
startActivity(browserIntent);
}
/**
* Request to report the given image.
*
* @param postData
* The PostData of the image.
*/
@Override
public void reportImage(final PostData postData) {
EasyTracker.getInstance(getActivity())
.send(MapBuilder.createEvent(Constants.Analytics.Category.POST_MENU_ACTION, Constants.Analytics.Action.REPORT_POST,
mCurrentSubreddit, null).build()
);
new Thread(new Runnable() {
@Override
public void run() {
RedditService.reportPost(getActivity(), postData);
}
}).start();
Toast.makeText(getActivity(), R.string.image_display_issue_reported, Toast.LENGTH_LONG).show();
}
@Subscribe
@Override
public void requestInProgress(RequestInProgressEvent event) {
super.requestInProgress(event);
}
@Subscribe
@Override
public void requestCompleted(RequestCompletedEvent event) {
super.requestCompleted(event);
}
@Override
public void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
if (mImageListView != null) {
outState.putInt(Constants.Extra.EXTRA_VISIBLE_POSITION, mImageListView.getFirstVisiblePosition());
View topView = mImageListView.getChildAt(0);
outState.putInt(Constants.Extra.EXTRA_TOP_OFFSET, topView != null ? topView.getTop() : 0);
}
}
}
|
|
/*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package org.postgresql.test.jdbc2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import org.postgresql.PGStatement;
import org.postgresql.core.BaseConnection;
import org.postgresql.core.ServerVersion;
import org.postgresql.jdbc.TimestampUtils;
import org.postgresql.test.TestUtil;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.sql.Date;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.GregorianCalendar;
import java.util.TimeZone;
/*
* Test get/setTimestamp for both timestamp with time zone and timestamp without time zone datatypes
* TODO: refactor to a property-based testing or paremeterized testing somehow so adding new times
* don't require to add constants and setters/getters. JUnit 5 would probably help here.
*/
@RunWith(Parameterized.class)
public class TimestampTest extends BaseTest4 {
public TimestampTest(BinaryMode binaryMode) {
setBinaryMode(binaryMode);
}
private TimeZone currentTZ;
@Parameterized.Parameters(name = "binary = {0}")
public static Iterable<Object[]> data() {
Collection<Object[]> ids = new ArrayList<Object[]>();
for (BinaryMode binaryMode : BinaryMode.values()) {
ids.add(new Object[]{binaryMode});
}
return ids;
}
@Override
public void setUp() throws Exception {
super.setUp();
TestUtil.createTable(con, TSWTZ_TABLE, "ts timestamp with time zone");
TestUtil.createTable(con, TSWOTZ_TABLE, "ts timestamp without time zone");
TestUtil.createTable(con, DATE_TABLE, "ts date");
currentTZ = TimeZone.getDefault();
}
@Override
public void tearDown() throws SQLException {
TestUtil.dropTable(con, TSWTZ_TABLE);
TestUtil.dropTable(con, TSWOTZ_TABLE);
TestUtil.dropTable(con, DATE_TABLE);
TimeZone.setDefault(currentTZ);
super.tearDown();
}
/**
* Ensure the driver doesn't modify a Calendar that is passed in.
*/
@Test
public void testCalendarModification() throws SQLException {
Calendar cal = Calendar.getInstance();
Calendar origCal = (Calendar) cal.clone();
PreparedStatement ps = con.prepareStatement("INSERT INTO " + TSWOTZ_TABLE + " VALUES (?)");
ps.setDate(1, new Date(0), cal);
ps.executeUpdate();
assertEquals(origCal, cal);
ps.setTimestamp(1, new Timestamp(0), cal);
ps.executeUpdate();
assertEquals(origCal, cal);
ps.setTime(1, new Time(0), cal);
// Can't actually execute this one because of type mismatch,
// but all we're really concerned about is the set call.
// ps.executeUpdate();
assertEquals(origCal, cal);
ps.close();
Statement stmt = con.createStatement();
ResultSet rs = stmt.executeQuery("SELECT ts FROM " + TSWOTZ_TABLE);
assertTrue(rs.next());
rs.getDate(1, cal);
assertEquals(origCal, cal);
rs.getTimestamp(1, cal);
assertEquals(origCal, cal);
rs.getTime(1, cal);
assertEquals(origCal, cal);
rs.close();
stmt.close();
}
@Test
public void testInfinity() throws SQLException {
runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
runInfinityTests(DATE_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
runInfinityTests(DATE_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
}
}
private void runInfinityTests(String table, long value) throws SQLException {
GregorianCalendar cal = new GregorianCalendar();
// Pick some random timezone that is hopefully different than ours
// and exists in this JVM.
cal.setTimeZone(TimeZone.getTimeZone("Europe/Warsaw"));
String strValue;
if (value == PGStatement.DATE_POSITIVE_INFINITY) {
strValue = "infinity";
} else {
strValue = "-infinity";
}
Statement stmt = con.createStatement();
stmt.executeUpdate(TestUtil.insertSQL(table, "'" + strValue + "'"));
stmt.close();
PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL(table, "?"));
ps.setTimestamp(1, new Timestamp(value));
ps.executeUpdate();
ps.setTimestamp(1, new Timestamp(value), cal);
ps.executeUpdate();
ps.close();
stmt = con.createStatement();
ResultSet rs = stmt.executeQuery("select ts from " + table);
while (rs.next()) {
assertEquals(strValue, rs.getString(1));
Timestamp ts = rs.getTimestamp(1);
assertEquals(value, ts.getTime());
Date d = rs.getDate(1);
assertEquals(value, d.getTime());
Timestamp tscal = rs.getTimestamp(1, cal);
assertEquals(value, tscal.getTime());
}
rs.close();
assertEquals(3, stmt.executeUpdate("DELETE FROM " + table));
stmt.close();
}
/*
* Tests the timestamp methods in ResultSet on timestamp with time zone we insert a known string
* value (don't use setTimestamp) then see that we get back the same value from getTimestamp
*/
@Test
public void testGetTimestampWTZ() throws SQLException {
assumeTrue(TestUtil.haveIntegerDateTimes(con));
Statement stmt = con.createStatement();
TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils();
// Insert the three timestamp values in raw pg format
for (int i = 0; i < 3; i++) {
assertEquals(1,
stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS1WTZ_PGFORMAT + "'")));
assertEquals(1,
stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS2WTZ_PGFORMAT + "'")));
assertEquals(1,
stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS3WTZ_PGFORMAT + "'")));
assertEquals(1,
stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS4WTZ_PGFORMAT + "'")));
}
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpDate1.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpDate2.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpDate3.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpDate4.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpTime1.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpTime2.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpTime3.getTime())) + "'")));
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
"'" + tsu.toString(null, new Timestamp(tmpTime4.getTime())) + "'")));
// Fall through helper
timestampTestWTZ();
assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE));
stmt.close();
}
/*
* Tests the timestamp methods in PreparedStatement on timestamp with time zone we insert a value
* using setTimestamp then see that we get back the same value from getTimestamp (which we know
* works as it was tested independently of setTimestamp
*/
@Test
public void testSetTimestampWTZ() throws SQLException {
assumeTrue(TestUtil.haveIntegerDateTimes(con));
Statement stmt = con.createStatement();
PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWTZ_TABLE, "?"));
pstmt.setTimestamp(1, TS1WTZ);
assertEquals(1, pstmt.executeUpdate());
pstmt.setTimestamp(1, TS2WTZ);
assertEquals(1, pstmt.executeUpdate());
pstmt.setTimestamp(1, TS3WTZ);
assertEquals(1, pstmt.executeUpdate());
pstmt.setTimestamp(1, TS4WTZ);
assertEquals(1, pstmt.executeUpdate());
// With java.sql.Timestamp
pstmt.setObject(1, TS1WTZ, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, TS2WTZ, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, TS3WTZ, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, TS4WTZ, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
// With Strings
pstmt.setObject(1, TS1WTZ_PGFORMAT, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, TS2WTZ_PGFORMAT, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, TS3WTZ_PGFORMAT, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, TS4WTZ_PGFORMAT, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
// With java.sql.Date
pstmt.setObject(1, tmpDate1, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, tmpDate2, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, tmpDate3, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, tmpDate4, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
// With java.sql.Time
pstmt.setObject(1, tmpTime1, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, tmpTime2, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, tmpTime3, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
pstmt.setObject(1, tmpTime4, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
// Fall through helper
timestampTestWTZ();
assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE));
pstmt.close();
stmt.close();
}
/*
* Tests the timestamp methods in ResultSet on timestamp without time zone we insert a known
* string value (don't use setTimestamp) then see that we get back the same value from
* getTimestamp
*/
@Test
public void testGetTimestampWOTZ() throws SQLException {
assumeTrue(TestUtil.haveIntegerDateTimes(con));
//Refer to #896
assumeMinimumServerVersion(ServerVersion.v8_4);
Statement stmt = con.createStatement();
TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils();
// Insert the three timestamp values in raw pg format
for (int i = 0; i < 3; i++) {
for (String value : TS__WOTZ_PGFORMAT) {
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, "'" + value + "'")));
}
}
for (java.util.Date date : TEST_DATE_TIMES) {
String stringValue = "'" + tsu.toString(null, new Timestamp(date.getTime())) + "'";
assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, stringValue)));
}
// Fall through helper
timestampTestWOTZ();
assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE));
stmt.close();
}
/*
* Tests the timestamp methods in PreparedStatement on timestamp without time zone we insert a
* value using setTimestamp then see that we get back the same value from getTimestamp (which we
* know works as it was tested independently of setTimestamp
*/
@Test
public void testSetTimestampWOTZ() throws SQLException {
assumeTrue(TestUtil.haveIntegerDateTimes(con));
//Refer to #896
assumeMinimumServerVersion(ServerVersion.v8_4);
Statement stmt = con.createStatement();
PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWOTZ_TABLE, "?"));
for (Timestamp timestamp : TS__WOTZ) {
pstmt.setTimestamp(1, timestamp);
assertEquals(1, pstmt.executeUpdate());
}
// With java.sql.Timestamp
for (Timestamp timestamp : TS__WOTZ) {
pstmt.setObject(1, timestamp, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
}
// With Strings
for (String value : TS__WOTZ_PGFORMAT) {
pstmt.setObject(1, value, Types.TIMESTAMP);
assertEquals(1, pstmt.executeUpdate());
}
// With java.sql.Date, java.sql.Time
for (java.util.Date date : TEST_DATE_TIMES) {
pstmt.setObject(1, date, Types.TIMESTAMP);
assertEquals("insert into TSWOTZ_TABLE via setObject(1, " + date
+ ", Types.TIMESTAMP) -> expecting one row inserted", 1, pstmt.executeUpdate());
}
// Fall through helper
timestampTestWOTZ();
assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE));
pstmt.close();
stmt.close();
}
/*
* Helper for the TimestampTests. It tests what should be in the db
*/
private void timestampTestWTZ() throws SQLException {
Statement stmt = con.createStatement();
ResultSet rs;
Timestamp t;
rs = stmt.executeQuery("select ts from " + TSWTZ_TABLE); // removed the order by ts
assertNotNull(rs);
for (int i = 0; i < 3; i++) {
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS1WTZ, t);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS2WTZ, t);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS3WTZ, t);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS4WTZ, t);
}
// Testing for Date
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpDate1.getTime(), t.getTime());
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpDate2.getTime(), t.getTime());
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpDate3.getTime(), t.getTime());
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpDate4.getTime(), t.getTime());
// Testing for Time
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpTime1.getTime(), t.getTime());
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpTime2.getTime(), t.getTime());
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpTime3.getTime(), t.getTime());
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(tmpTime4.getTime(), t.getTime());
assertTrue(!rs.next()); // end of table. Fail if more entries exist.
rs.close();
stmt.close();
}
/*
* Helper for the TimestampTests. It tests what should be in the db
*/
private void timestampTestWOTZ() throws SQLException {
Statement stmt = con.createStatement();
Timestamp t;
String tString;
ResultSet rs = stmt.executeQuery("select ts from " + TSWOTZ_TABLE); // removed the order by ts
assertNotNull(rs);
for (int i = 0; i < 3; i++) {
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS1WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS1WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS2WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS2WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS3WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS3WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS4WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS4WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS5WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS5WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS6WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS6WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS7WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS7WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS8WOTZ, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS8WOTZ_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS9WOTZ_ROUNDED, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS9WOTZ_ROUNDED_PGFORMAT, tString);
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals(TS10WOTZ_ROUNDED, t);
tString = rs.getString(1);
assertNotNull(tString);
assertEquals(TS10WOTZ_ROUNDED_PGFORMAT, tString);
}
// Testing for Date
for (java.util.Date expected : TEST_DATE_TIMES) {
assertTrue(rs.next());
t = rs.getTimestamp(1);
assertNotNull(t);
assertEquals("rs.getTimestamp(1).getTime()", expected.getTime(), t.getTime());
}
assertTrue(!rs.next()); // end of table. Fail if more entries exist.
rs.close();
stmt.close();
}
@Test
public void testJavaTimestampFromSQLTime() throws SQLException {
Statement st = con.createStatement();
ResultSet rs = st.executeQuery("SELECT '00:00:05.123456'::time as t, '1970-01-01 00:00:05.123456'::timestamp as ts, "
+ "'00:00:05.123456 +0300'::time with time zone as tz, '1970-01-01 00:00:05.123456 +0300'::timestamp with time zone as tstz ");
rs.next();
Timestamp t = rs.getTimestamp("t");
Timestamp ts = rs.getTimestamp("ts");
Timestamp tz = rs.getTimestamp("tz");
Timestamp tstz = rs.getTimestamp("tstz");
Integer desiredNanos = 123456000;
Integer tNanos = t.getNanos();
Integer tzNanos = tz.getNanos();
assertEquals("Time should be microsecond-accurate", desiredNanos, tNanos);
assertEquals("Time with time zone should be microsecond-accurate", desiredNanos, tzNanos);
assertEquals("Unix epoch timestamp and Time should match", ts, t);
assertEquals("Unix epoch timestamp with time zone and time with time zone should match", tstz, tz);
}
private static Timestamp getTimestamp(int y, int m, int d, int h, int mn, int se, int f,
String tz) {
Timestamp result = null;
java.text.DateFormat dateFormat;
try {
String ts;
ts = TestUtil.fix(y, 4) + "-"
+ TestUtil.fix(m, 2) + "-"
+ TestUtil.fix(d, 2) + " "
+ TestUtil.fix(h, 2) + ":"
+ TestUtil.fix(mn, 2) + ":"
+ TestUtil.fix(se, 2) + " ";
if (tz == null) {
dateFormat = new SimpleDateFormat("y-M-d H:m:s");
} else {
ts = ts + tz;
dateFormat = new SimpleDateFormat("y-M-d H:m:s z");
}
java.util.Date date = dateFormat.parse(ts);
result = new Timestamp(date.getTime());
result.setNanos(f);
} catch (Exception ex) {
fail(ex.getMessage());
}
return result;
}
private static final Timestamp TS1WTZ =
getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, "PST");
private static final String TS1WTZ_PGFORMAT = "1950-02-07 15:00:00.1-08";
private static final Timestamp TS2WTZ =
getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, "GMT");
private static final String TS2WTZ_PGFORMAT = "2000-02-07 15:00:00.12+00";
private static final Timestamp TS3WTZ =
getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, "GMT");
private static final String TS3WTZ_PGFORMAT = "2000-07-07 15:00:00.123+00";
private static final Timestamp TS4WTZ =
getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, "GMT");
private static final String TS4WTZ_PGFORMAT = "2000-07-07 15:00:00.123456+00";
private static final Timestamp TS1WOTZ =
getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, null);
private static final String TS1WOTZ_PGFORMAT = "1950-02-07 15:00:00.1";
private static final Timestamp TS2WOTZ =
getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, null);
private static final String TS2WOTZ_PGFORMAT = "2000-02-07 15:00:00.12";
private static final Timestamp TS3WOTZ =
getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, null);
private static final String TS3WOTZ_PGFORMAT = "2000-07-07 15:00:00.123";
private static final Timestamp TS4WOTZ =
getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, null);
private static final String TS4WOTZ_PGFORMAT = "2000-07-07 15:00:00.123456";
private static final Timestamp TS5WOTZ =
new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
private static final String TS5WOTZ_PGFORMAT = "-infinity";
private static final Timestamp TS6WOTZ =
new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
private static final String TS6WOTZ_PGFORMAT = "infinity";
private static final Timestamp TS7WOTZ =
getTimestamp(2000, 7, 7, 15, 0, 0, 0, null);
private static final String TS7WOTZ_PGFORMAT = "2000-07-07 15:00:00";
private static final Timestamp TS8WOTZ =
getTimestamp(2000, 7, 7, 15, 0, 0, 20400000, null);
private static final String TS8WOTZ_PGFORMAT = "2000-07-07 15:00:00.0204";
private static final Timestamp TS9WOTZ =
getTimestamp(2000, 2, 7, 15, 0, 0, 789, null);
private static final String TS9WOTZ_PGFORMAT = "2000-02-07 15:00:00.000000789";
private static final Timestamp TS9WOTZ_ROUNDED =
getTimestamp(2000, 2, 7, 15, 0, 0, 1000, null);
private static final String TS9WOTZ_ROUNDED_PGFORMAT = "2000-02-07 15:00:00.000001";
private static final Timestamp TS10WOTZ =
getTimestamp(2018, 12, 31, 23, 59, 59, 999999500, null);
private static final String TS10WOTZ_PGFORMAT = "2018-12-31 23:59:59.999999500";
private static final Timestamp TS10WOTZ_ROUNDED =
getTimestamp(2019, 1, 1, 0, 0, 0, 0, null);
private static final String TS10WOTZ_ROUNDED_PGFORMAT = "2019-01-01 00:00:00";
private static final Timestamp[] TS__WOTZ = {
TS1WOTZ, TS2WOTZ, TS3WOTZ, TS4WOTZ, TS5WOTZ,
TS6WOTZ, TS7WOTZ, TS8WOTZ, TS9WOTZ, TS10WOTZ,
};
private static final String[] TS__WOTZ_PGFORMAT = {
TS1WOTZ_PGFORMAT, TS2WOTZ_PGFORMAT, TS3WOTZ_PGFORMAT, TS4WOTZ_PGFORMAT, TS5WOTZ_PGFORMAT,
TS6WOTZ_PGFORMAT, TS7WOTZ_PGFORMAT, TS8WOTZ_PGFORMAT, TS9WOTZ_PGFORMAT, TS10WOTZ_PGFORMAT,
};
private static final String TSWTZ_TABLE = "testtimestampwtz";
private static final String TSWOTZ_TABLE = "testtimestampwotz";
private static final String DATE_TABLE = "testtimestampdate";
private static final java.sql.Date tmpDate1 = new java.sql.Date(TS1WTZ.getTime());
private static final java.sql.Time tmpTime1 = new java.sql.Time(TS1WTZ.getTime());
private static final java.sql.Date tmpDate2 = new java.sql.Date(TS2WTZ.getTime());
private static final java.sql.Time tmpTime2 = new java.sql.Time(TS2WTZ.getTime());
private static final java.sql.Date tmpDate3 = new java.sql.Date(TS3WTZ.getTime());
private static final java.sql.Time tmpTime3 = new java.sql.Time(TS3WTZ.getTime());
private static final java.sql.Date tmpDate4 = new java.sql.Date(TS4WTZ.getTime());
private static final java.sql.Time tmpTime4 = new java.sql.Time(TS4WTZ.getTime());
private static final java.sql.Date tmpDate1WOTZ = new java.sql.Date(TS1WOTZ.getTime());
private static final java.sql.Time tmpTime1WOTZ = new java.sql.Time(TS1WOTZ.getTime());
private static final java.sql.Date tmpDate2WOTZ = new java.sql.Date(TS2WOTZ.getTime());
private static final java.sql.Time tmpTime2WOTZ = new java.sql.Time(TS2WOTZ.getTime());
private static final java.sql.Date tmpDate3WOTZ = new java.sql.Date(TS3WOTZ.getTime());
private static final java.sql.Time tmpTime3WOTZ = new java.sql.Time(TS3WOTZ.getTime());
private static final java.sql.Date tmpDate4WOTZ = new java.sql.Date(TS4WOTZ.getTime());
private static final java.sql.Time tmpTime4WOTZ = new java.sql.Time(TS4WOTZ.getTime());
private static final java.sql.Date tmpDate5WOTZ = new java.sql.Date(TS5WOTZ.getTime());
private static final java.sql.Date tmpTime5WOTZ = new java.sql.Date(TS5WOTZ.getTime());
private static final java.sql.Date tmpDate6WOTZ = new java.sql.Date(TS6WOTZ.getTime());
private static final java.sql.Date tmpTime6WOTZ = new java.sql.Date(TS6WOTZ.getTime());
private static final java.sql.Date tmpDate7WOTZ = new java.sql.Date(TS7WOTZ.getTime());
private static final java.sql.Time tmpTime7WOTZ = new java.sql.Time(TS7WOTZ.getTime());
private static final java.sql.Date tmpDate8WOTZ = new java.sql.Date(TS8WOTZ.getTime());
private static final java.sql.Time tmpTime8WOTZ = new java.sql.Time(TS8WOTZ.getTime());
private static final java.sql.Date tmpDate9WOTZ = new java.sql.Date(TS9WOTZ.getTime());
private static final java.sql.Time tmpTime9WOTZ = new java.sql.Time(TS9WOTZ.getTime());
private static final java.sql.Date tmpDate10WOTZ = new java.sql.Date(TS10WOTZ.getTime());
private static final java.sql.Time tmpTime10WOTZ = new java.sql.Time(TS10WOTZ.getTime());
private static final java.util.Date[] TEST_DATE_TIMES = {
tmpDate1WOTZ, tmpDate2WOTZ, tmpDate3WOTZ, tmpDate4WOTZ, tmpDate5WOTZ,
tmpDate6WOTZ, tmpDate7WOTZ, tmpDate8WOTZ, tmpDate9WOTZ, tmpDate10WOTZ,
tmpTime1WOTZ, tmpTime2WOTZ, tmpTime3WOTZ, tmpTime4WOTZ, tmpTime5WOTZ,
tmpTime6WOTZ, tmpTime7WOTZ, tmpTime8WOTZ, tmpTime9WOTZ, tmpTime10WOTZ,
};
}
|
|
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.util.Collection;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* Instance of an HBase ServerName.
* A server name is used uniquely identifying a server instance and is made
* of the combination of hostname, port, and startcode. The startcode
* distingushes restarted servers on same hostname and port (startcode is
* usually timestamp of server startup). The {@link #toString()} format of
* ServerName is safe to use in the filesystem and as znode name up in
* ZooKeeper. Its format is:
* <code><hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode></code>.
* For example, if hostname is <code>example.org</code>, port is <code>1234</code>,
* and the startcode for the regionserver is <code>1212121212</code>, then
* the {@link #toString()} would be <code>example.org,1234,1212121212</code>.
*
* <p>You can obtain a versioned serialized form of this class by calling
* {@link #getVersionedBytes()}. To deserialize, call {@link #parseVersionedServerName(byte[])}
*
* <p>Immutable.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ServerName implements Comparable<ServerName> {
/**
* Version for this class.
* Its a short rather than a byte so I can for sure distinguish between this
* version of this class and the version previous to this which did not have
* a version.
*/
private static final short VERSION = 0;
static final byte [] VERSION_BYTES = Bytes.toBytes(VERSION);
/**
* What to use if no startcode supplied.
*/
public static final int NON_STARTCODE = -1;
/**
* This character is used as separator between server hostname, port and
* startcode.
*/
public static final String SERVERNAME_SEPARATOR = ",";
public static Pattern SERVERNAME_PATTERN =
Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" +
SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX +
SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$");
/**
* What to use if server name is unknown.
*/
public static final String UNKNOWN_SERVERNAME = "#unknown#";
private final String servername;
private final String hostname;
private final int port;
private final long startcode;
/**
* Cached versioned bytes of this ServerName instance.
* @see #getVersionedBytes()
*/
private byte [] bytes;
public ServerName(final String hostname, final int port, final long startcode) {
this.hostname = hostname;
this.port = port;
this.startcode = startcode;
this.servername = getServerName(hostname, port, startcode);
}
public ServerName(final String serverName) {
this(parseHostname(serverName), parsePort(serverName),
parseStartcode(serverName));
}
public ServerName(final String hostAndPort, final long startCode) {
this(Addressing.parseHostname(hostAndPort),
Addressing.parsePort(hostAndPort), startCode);
}
public static String parseHostname(final String serverName) {
if (serverName == null || serverName.length() <= 0) {
throw new IllegalArgumentException("Passed hostname is null or empty");
}
int index = serverName.indexOf(SERVERNAME_SEPARATOR);
return serverName.substring(0, index);
}
public static int parsePort(final String serverName) {
String [] split = serverName.split(SERVERNAME_SEPARATOR);
return Integer.parseInt(split[1]);
}
public static long parseStartcode(final String serverName) {
int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR);
return Long.parseLong(serverName.substring(index + 1));
}
@Override
public String toString() {
return getServerName();
}
/**
* @return {@link #getServerName()} as bytes with a short-sized prefix with
* the ServerName#VERSION of this class.
*/
public synchronized byte [] getVersionedBytes() {
if (this.bytes == null) {
this.bytes = Bytes.add(VERSION_BYTES, Bytes.toBytes(getServerName()));
}
return this.bytes;
}
public String getServerName() {
return servername;
}
public String getHostname() {
return hostname;
}
public int getPort() {
return port;
}
public long getStartcode() {
return startcode;
}
/**
* @param hostName
* @param port
* @param startcode
* @return Server name made of the concatenation of hostname, port and
* startcode formatted as <code><hostname> ',' <port> ',' <startcode></code>
*/
public static String getServerName(String hostName, int port, long startcode) {
final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13);
name.append(hostName);
name.append(SERVERNAME_SEPARATOR);
name.append(port);
name.append(SERVERNAME_SEPARATOR);
name.append(startcode);
return name.toString();
}
/**
* @param hostAndPort String in form of <hostname> ':' <port>
* @param startcode
* @return Server name made of the concatenation of hostname, port and
* startcode formatted as <code><hostname> ',' <port> ',' <startcode></code>
*/
public static String getServerName(final String hostAndPort,
final long startcode) {
int index = hostAndPort.indexOf(":");
if (index <= 0) throw new IllegalArgumentException("Expected <hostname> ':' <port>");
return getServerName(hostAndPort.substring(0, index),
Integer.parseInt(hostAndPort.substring(index + 1)), startcode);
}
/**
* @return Hostname and port formatted as described at
* {@link Addressing#createHostAndPortStr(String, int)}
*/
public String getHostAndPort() {
return Addressing.createHostAndPortStr(this.hostname, this.port);
}
/**
* @param serverName ServerName in form specified by {@link #getServerName()}
* @return The server start code parsed from <code>servername</code>
*/
public static long getServerStartcodeFromServerName(final String serverName) {
int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR);
return Long.parseLong(serverName.substring(index + 1));
}
/**
* Utility method to excise the start code from a server name
* @param inServerName full server name
* @return server name less its start code
*/
public static String getServerNameLessStartCode(String inServerName) {
if (inServerName != null && inServerName.length() > 0) {
int index = inServerName.lastIndexOf(SERVERNAME_SEPARATOR);
if (index > 0) {
return inServerName.substring(0, index);
}
}
return inServerName;
}
@Override
public int compareTo(ServerName other) {
int compare = this.getHostname().toLowerCase().
compareTo(other.getHostname().toLowerCase());
if (compare != 0) return compare;
compare = this.getPort() - other.getPort();
if (compare != 0) return compare;
return (int)(this.getStartcode() - other.getStartcode());
}
@Override
public int hashCode() {
return getServerName().hashCode();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null) return false;
if (!(o instanceof ServerName)) return false;
return this.compareTo((ServerName)o) == 0;
}
/**
* @return ServerName with matching hostname and port.
*/
public static ServerName findServerWithSameHostnamePort(final Collection<ServerName> names,
final ServerName serverName) {
for (ServerName sn: names) {
if (isSameHostnameAndPort(serverName, sn)) return sn;
}
return null;
}
/**
* @param left
* @param right
* @return True if <code>other</code> has same hostname and port.
*/
public static boolean isSameHostnameAndPort(final ServerName left,
final ServerName right) {
if (left == null) return false;
if (right == null) return false;
return left.getHostname().equals(right.getHostname()) &&
left.getPort() == right.getPort();
}
/**
* Use this method instantiating a {@link ServerName} from bytes
* gotten from a call to {@link #getVersionedBytes()}. Will take care of the
* case where bytes were written by an earlier version of hbase.
* @param versionedBytes Pass bytes gotten from a call to {@link #getVersionedBytes()}
* @return A ServerName instance.
* @see #getVersionedBytes()
*/
public static ServerName parseVersionedServerName(final byte [] versionedBytes) {
// Version is a short.
short version = Bytes.toShort(versionedBytes);
if (version == VERSION) {
int length = versionedBytes.length - Bytes.SIZEOF_SHORT;
return new ServerName(Bytes.toString(versionedBytes, Bytes.SIZEOF_SHORT, length));
}
// Presume the bytes were written with an old version of hbase and that the
// bytes are actually a String of the form "'<hostname>' ':' '<port>'".
return new ServerName(Bytes.toString(versionedBytes), NON_STARTCODE);
}
/**
* @param str Either an instance of {@link ServerName#toString()} or a
* "'<hostname>' ':' '<port>'".
* @return A ServerName instance.
*/
public static ServerName parseServerName(final String str) {
return SERVERNAME_PATTERN.matcher(str).matches()? new ServerName(str):
new ServerName(str, NON_STARTCODE);
}
/**
* @return true if the String follows the pattern of {@link ServerName#toString()}, false
* otherwise.
*/
public static boolean isFullServerName(final String str){
if (str == null ||str.isEmpty()) return false;
return SERVERNAME_PATTERN.matcher(str).matches();
}
/**
* Get a ServerName from the passed in data bytes.
* @param data Data with a serialize server name in it; can handle the old style
* servername where servername was host and port. Works too with data that
* begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
* has a serialized {@link ServerName} in it.
* @return Returns null if <code>data</code> is null else converts passed data
* to a ServerName instance.
* @throws DeserializationException
*/
public static ServerName parseFrom(final byte [] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;
if (ProtobufUtil.isPBMagicPrefix(data)) {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
try {
RootRegionServer rss =
RootRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer();
return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode());
} catch (InvalidProtocolBufferException e) {
// A failed parse of the znode is pretty catastrophic. Rather than loop
// retrying hoping the bad bytes will changes, and rather than change
// the signature on this method to add an IOE which will send ripples all
// over the code base, throw a RuntimeException. This should "never" happen.
// Fail fast if it does.
throw new DeserializationException(e);
}
}
// The str returned could be old style -- pre hbase-1502 -- which was
// hostname and port seperated by a colon rather than hostname, port and
// startcode delimited by a ','.
String str = Bytes.toString(data);
int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
if (index != -1) {
// Presume its ServerName serialized with versioned bytes.
return ServerName.parseVersionedServerName(data);
}
// Presume it a hostname:port format.
String hostname = Addressing.parseHostname(str);
int port = Addressing.parsePort(str);
return new ServerName(hostname, port, -1L);
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.EOFException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
/**
* A thread per active or standby namenode to perform:
* <ul>
* <li> Pre-registration handshake with namenode</li>
* <li> Registration with namenode</li>
* <li> Send periodic heartbeats to the namenode</li>
* <li> Handle commands received from the namenode</li>
* </ul>
*/
@InterfaceAudience.Private
class BPServiceActor implements Runnable {
static final Log LOG = DataNode.LOG;
final InetSocketAddress nnAddr;
HAServiceState state;
final BPOfferService bpos;
// lastBlockReport and lastHeartbeat may be assigned/read
// by testing threads (through BPServiceActor#triggerXXX), while also
// assigned/read by the actor thread. Thus they should be declared as volatile
// to make sure the "happens-before" consistency.
volatile long lastBlockReport = 0;
boolean resetBlockReportTime = true;
volatile long lastCacheReport = 0;
Thread bpThread;
DatanodeProtocolClientSideTranslatorPB bpNamenode;
private volatile long lastHeartbeat = 0;
static enum RunningState {
CONNECTING, INIT_FAILED, RUNNING, EXITED, FAILED;
}
private volatile RunningState runningState = RunningState.CONNECTING;
/**
* Between block reports (which happen on the order of once an hour) the
* DN reports smaller incremental changes to its block list. This map,
* keyed by block ID, contains the pending changes which have yet to be
* reported to the NN. Access should be synchronized on this object.
*/
private final Map<DatanodeStorage, PerStoragePendingIncrementalBR>
pendingIncrementalBRperStorage = Maps.newHashMap();
// IBR = Incremental Block Report. If this flag is set then an IBR will be
// sent immediately by the actor thread without waiting for the IBR timer
// to elapse.
private volatile boolean sendImmediateIBR = false;
private volatile boolean shouldServiceRun = true;
private final DataNode dn;
private final DNConf dnConf;
private DatanodeRegistration bpRegistration;
final LinkedList<BPServiceActorAction> bpThreadQueue
= new LinkedList<BPServiceActorAction>();
BPServiceActor(InetSocketAddress nnAddr, BPOfferService bpos) {
this.bpos = bpos;
this.dn = bpos.getDataNode();
this.nnAddr = nnAddr;
this.dnConf = dn.getDnConf();
}
boolean isAlive() {
if (!shouldServiceRun || !bpThread.isAlive()) {
return false;
}
return runningState == BPServiceActor.RunningState.RUNNING
|| runningState == BPServiceActor.RunningState.CONNECTING;
}
@Override
public String toString() {
return bpos.toString() + " service to " + nnAddr;
}
InetSocketAddress getNNSocketAddress() {
return nnAddr;
}
/**
* Used to inject a spy NN in the unit tests.
*/
@VisibleForTesting
void setNameNode(DatanodeProtocolClientSideTranslatorPB dnProtocol) {
bpNamenode = dnProtocol;
}
@VisibleForTesting
DatanodeProtocolClientSideTranslatorPB getNameNodeProxy() {
return bpNamenode;
}
/**
* Perform the first part of the handshake with the NameNode.
* This calls <code>versionRequest</code> to determine the NN's
* namespace and version info. It automatically retries until
* the NN responds or the DN is shutting down.
*
* @return the NamespaceInfo
*/
@VisibleForTesting
NamespaceInfo retrieveNamespaceInfo() throws IOException {
NamespaceInfo nsInfo = null;
while (shouldRun()) {
try {
nsInfo = bpNamenode.versionRequest();
LOG.debug(this + " received versionRequest response: " + nsInfo);
break;
} catch(SocketTimeoutException e) { // namenode is busy
LOG.warn("Problem connecting to server: " + nnAddr);
} catch(IOException e ) { // namenode is not available
LOG.warn("Problem connecting to server: " + nnAddr);
}
// try again in a second
sleepAndLogInterrupts(5000, "requesting version info from NN");
}
if (nsInfo != null) {
checkNNVersion(nsInfo);
} else {
throw new IOException("DN shut down before block pool connected");
}
return nsInfo;
}
private void checkNNVersion(NamespaceInfo nsInfo)
throws IncorrectVersionException {
// build and layout versions should match
String nnVersion = nsInfo.getSoftwareVersion();
String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
IncorrectVersionException ive = new IncorrectVersionException(
minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
LOG.warn(ive.getMessage());
throw ive;
}
String dnVersion = VersionInfo.getVersion();
if (!nnVersion.equals(dnVersion)) {
LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
"DataNode version '" + dnVersion + "' but is within acceptable " +
"limits. Note: This is normal during a rolling upgrade.");
}
}
private void connectToNNAndHandshake() throws IOException {
// get NN proxy
bpNamenode = dn.connectToNN(nnAddr);
// First phase of the handshake with NN - get the namespace
// info.
NamespaceInfo nsInfo = retrieveNamespaceInfo();
// Verify that this matches the other NN in this HA pair.
// This also initializes our block pool in the DN if we are
// the first NN connection for this BP.
bpos.verifyAndSetNamespaceInfo(nsInfo);
// Second phase of the handshake with the NN.
register(nsInfo);
}
// This is useful to make sure NN gets Heartbeat before Blockreport
// upon NN restart while DN keeps retrying Otherwise,
// 1. NN restarts.
// 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
// 3. After reregistration completes, DN will send Blockreport first.
// 4. Given NN receives Blockreport after Heartbeat, it won't mark
// DatanodeStorageInfo#blockContentsStale to false until the next
// Blockreport.
void scheduleHeartbeat() {
lastHeartbeat = 0;
}
/**
* This methods arranges for the data node to send the block report at
* the next heartbeat.
*/
void scheduleBlockReport(long delay) {
if (delay > 0) { // send BR after random delay
lastBlockReport = monotonicNow()
- ( dnConf.blockReportInterval - DFSUtil.getRandom().nextInt((int)(delay)));
} else { // send at next heartbeat
lastBlockReport = lastHeartbeat - dnConf.blockReportInterval;
}
resetBlockReportTime = true; // reset future BRs for randomness
}
/**
* Report received blocks and delete hints to the Namenode for each
* storage.
*
* @throws IOException
*/
private void reportReceivedDeletedBlocks() throws IOException {
// Generate a list of the pending reports for each storage under the lock
ArrayList<StorageReceivedDeletedBlocks> reports =
new ArrayList<StorageReceivedDeletedBlocks>(pendingIncrementalBRperStorage.size());
synchronized (pendingIncrementalBRperStorage) {
for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
pendingIncrementalBRperStorage.entrySet()) {
final DatanodeStorage storage = entry.getKey();
final PerStoragePendingIncrementalBR perStorageMap = entry.getValue();
if (perStorageMap.getBlockInfoCount() > 0) {
// Send newly-received and deleted blockids to namenode
ReceivedDeletedBlockInfo[] rdbi = perStorageMap.dequeueBlockInfos();
reports.add(new StorageReceivedDeletedBlocks(storage, rdbi));
}
}
sendImmediateIBR = false;
}
if (reports.size() == 0) {
// Nothing new to report.
return;
}
// Send incremental block reports to the Namenode outside the lock
boolean success = false;
final long startTime = monotonicNow();
try {
bpNamenode.blockReceivedAndDeleted(bpRegistration,
bpos.getBlockPoolId(),
reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]));
success = true;
} finally {
dn.getMetrics().addIncrementalBlockReport(monotonicNow() - startTime);
if (!success) {
synchronized (pendingIncrementalBRperStorage) {
for (StorageReceivedDeletedBlocks report : reports) {
// If we didn't succeed in sending the report, put all of the
// blocks back onto our queue, but only in the case where we
// didn't put something newer in the meantime.
PerStoragePendingIncrementalBR perStorageMap =
pendingIncrementalBRperStorage.get(report.getStorage());
perStorageMap.putMissingBlockInfos(report.getBlocks());
sendImmediateIBR = true;
}
}
}
}
}
/**
* @return pending incremental block report for given {@code storage}
*/
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
DatanodeStorage storage) {
PerStoragePendingIncrementalBR mapForStorage =
pendingIncrementalBRperStorage.get(storage);
if (mapForStorage == null) {
// This is the first time we are adding incremental BR state for
// this storage so create a new map. This is required once per
// storage, per service actor.
mapForStorage = new PerStoragePendingIncrementalBR();
pendingIncrementalBRperStorage.put(storage, mapForStorage);
}
return mapForStorage;
}
/**
* Add a blockInfo for notification to NameNode. If another entry
* exists for the same block it is removed.
*
* Caller must synchronize access using pendingIncrementalBRperStorage.
*/
void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
DatanodeStorage storage) {
// Make sure another entry for the same block is first removed.
// There may only be one such entry.
for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
pendingIncrementalBRperStorage.entrySet()) {
if (entry.getValue().removeBlockInfo(bInfo)) {
break;
}
}
getIncrementalBRMapForStorage(storage).putBlockInfo(bInfo);
}
/*
* Informing the name node could take a long long time! Should we wait
* till namenode is informed before responding with success to the
* client? For now we don't.
*/
void notifyNamenodeBlock(ReceivedDeletedBlockInfo bInfo,
String storageUuid, boolean now) {
synchronized (pendingIncrementalBRperStorage) {
addPendingReplicationBlockInfo(
bInfo, dn.getFSDataset().getStorage(storageUuid));
sendImmediateIBR = true;
// If now is true, the report is sent right away.
// Otherwise, it will be sent out in the next heartbeat.
if (now) {
pendingIncrementalBRperStorage.notifyAll();
}
}
}
void notifyNamenodeDeletedBlock(
ReceivedDeletedBlockInfo bInfo, String storageUuid) {
synchronized (pendingIncrementalBRperStorage) {
addPendingReplicationBlockInfo(
bInfo, dn.getFSDataset().getStorage(storageUuid));
}
}
/**
* Run an immediate block report on this thread. Used by tests.
*/
@VisibleForTesting
void triggerBlockReportForTests() {
synchronized (pendingIncrementalBRperStorage) {
lastBlockReport = 0;
lastHeartbeat = 0;
pendingIncrementalBRperStorage.notifyAll();
while (lastBlockReport == 0) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
return;
}
}
}
}
@VisibleForTesting
void triggerHeartbeatForTests() {
synchronized (pendingIncrementalBRperStorage) {
lastHeartbeat = 0;
pendingIncrementalBRperStorage.notifyAll();
while (lastHeartbeat == 0) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
return;
}
}
}
}
@VisibleForTesting
void triggerDeletionReportForTests() {
synchronized (pendingIncrementalBRperStorage) {
sendImmediateIBR = true;
pendingIncrementalBRperStorage.notifyAll();
while (sendImmediateIBR) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
return;
}
}
}
}
@VisibleForTesting
boolean hasPendingIBR() {
return sendImmediateIBR;
}
private long prevBlockReportId = 0;
private long generateUniqueBlockReportId() {
long id = System.nanoTime();
if (id <= prevBlockReportId) {
id = prevBlockReportId + 1;
}
prevBlockReportId = id;
return id;
}
/**
* Report the list blocks to the Namenode
* @return DatanodeCommands returned by the NN. May be null.
* @throws IOException
*/
List<DatanodeCommand> blockReport() throws IOException {
// send block report if timer has expired.
final long startTime = monotonicNow();
if (startTime - lastBlockReport <= dnConf.blockReportInterval) {
return null;
}
final ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
// Flush any block information that precedes the block report. Otherwise
// we have a chance that we will miss the delHint information
// or we will report an RBW replica after the BlockReport already reports
// a FINALIZED one.
reportReceivedDeletedBlocks();
long brCreateStartTime = monotonicNow();
Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());
// Convert the reports to the format expected by the NN.
int i = 0;
int totalBlockCount = 0;
StorageBlockReport reports[] =
new StorageBlockReport[perVolumeBlockLists.size()];
for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
BlockListAsLongs blockList = kvPair.getValue();
reports[i++] = new StorageBlockReport(kvPair.getKey(), blockList);
totalBlockCount += blockList.getNumberOfBlocks();
}
// Send the reports to the NN.
int numReportsSent = 0;
int numRPCs = 0;
boolean success = false;
long brSendStartTime = monotonicNow();
long reportId = generateUniqueBlockReportId();
try {
if (totalBlockCount < dnConf.blockReportSplitThreshold) {
// Below split threshold, send all reports in a single message.
DatanodeCommand cmd = bpNamenode.blockReport(
bpRegistration, bpos.getBlockPoolId(), reports,
new BlockReportContext(1, 0, reportId));
numRPCs = 1;
numReportsSent = reports.length;
if (cmd != null) {
cmds.add(cmd);
}
} else {
// Send one block report per message.
for (int r = 0; r < reports.length; r++) {
StorageBlockReport singleReport[] = { reports[r] };
DatanodeCommand cmd = bpNamenode.blockReport(
bpRegistration, bpos.getBlockPoolId(), singleReport,
new BlockReportContext(reports.length, r, reportId));
numReportsSent++;
numRPCs++;
if (cmd != null) {
cmds.add(cmd);
}
}
}
success = true;
} finally {
// Log the block report processing stats from Datanode perspective
long brSendCost = monotonicNow() - brSendStartTime;
long brCreateCost = brSendStartTime - brCreateStartTime;
dn.getMetrics().addBlockReport(brSendCost);
final int nCmds = cmds.size();
LOG.info((success ? "S" : "Uns") +
"uccessfully sent block report 0x" +
Long.toHexString(reportId) + ", containing " + reports.length +
" storage report(s), of which we sent " + numReportsSent + "." +
" The reports had " + totalBlockCount +
" total blocks and used " + numRPCs +
" RPC(s). This took " + brCreateCost +
" msec to generate and " + brSendCost +
" msecs for RPC and NN processing." +
" Got back " +
((nCmds == 0) ? "no commands" :
((nCmds == 1) ? "one command: " + cmds.get(0) :
(nCmds + " commands: " + Joiner.on("; ").join(cmds)))) +
".");
}
scheduleNextBlockReport(startTime);
return cmds.size() == 0 ? null : cmds;
}
private void scheduleNextBlockReport(long previousReportStartTime) {
// If we have sent the first set of block reports, then wait a random
// time before we start the periodic block reports.
if (resetBlockReportTime) {
lastBlockReport = previousReportStartTime -
DFSUtil.getRandom().nextInt((int)(dnConf.blockReportInterval));
resetBlockReportTime = false;
} else {
/* say the last block report was at 8:20:14. The current report
* should have started around 9:20:14 (default 1 hour interval).
* If current time is :
* 1) normal like 9:20:18, next report should be at 10:20:14
* 2) unexpected like 11:35:43, next report should be at 12:20:14
*/
lastBlockReport += (monotonicNow() - lastBlockReport) /
dnConf.blockReportInterval * dnConf.blockReportInterval;
}
}
DatanodeCommand cacheReport() throws IOException {
// If caching is disabled, do not send a cache report
if (dn.getFSDataset().getCacheCapacity() == 0) {
return null;
}
// send cache report if timer has expired.
DatanodeCommand cmd = null;
final long startTime = monotonicNow();
if (startTime - lastCacheReport > dnConf.cacheReportInterval) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending cacheReport from service actor: " + this);
}
lastCacheReport = startTime;
String bpid = bpos.getBlockPoolId();
List<Long> blockIds = dn.getFSDataset().getCacheReport(bpid);
long createTime = monotonicNow();
cmd = bpNamenode.cacheReport(bpRegistration, bpid, blockIds);
long sendTime = monotonicNow();
long createCost = createTime - startTime;
long sendCost = sendTime - createTime;
dn.getMetrics().addCacheReport(sendCost);
LOG.debug("CacheReport of " + blockIds.size()
+ " block(s) took " + createCost + " msec to generate and "
+ sendCost + " msecs for RPC and NN processing");
}
return cmd;
}
HeartbeatResponse sendHeartBeat() throws IOException {
StorageReport[] reports =
dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
if (LOG.isDebugEnabled()) {
LOG.debug("Sending heartbeat with " + reports.length +
" storage reports from service actor: " + this);
}
VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
.getVolumeFailureSummary();
int numFailedVolumes = volumeFailureSummary != null ?
volumeFailureSummary.getFailedStorageLocations().length : 0;
return bpNamenode.sendHeartbeat(bpRegistration,
reports,
dn.getFSDataset().getCacheCapacity(),
dn.getFSDataset().getCacheUsed(),
dn.getXmitsInProgress(),
dn.getXceiverCount(),
numFailedVolumes,
volumeFailureSummary);
}
//This must be called only by BPOfferService
void start() {
if ((bpThread != null) && (bpThread.isAlive())) {
//Thread is started already
return;
}
bpThread = new Thread(this, formatThreadName());
bpThread.setDaemon(true); // needed for JUnit testing
bpThread.start();
}
private String formatThreadName() {
Collection<StorageLocation> dataDirs =
DataNode.getStorageLocations(dn.getConf());
return "DataNode: [" + dataDirs.toString() + "] " +
" heartbeating to " + nnAddr;
}
//This must be called only by blockPoolManager.
void stop() {
shouldServiceRun = false;
if (bpThread != null) {
bpThread.interrupt();
}
}
//This must be called only by blockPoolManager
void join() {
try {
if (bpThread != null) {
bpThread.join();
}
} catch (InterruptedException ie) { }
}
//Cleanup method to be called by current thread before exiting.
private synchronized void cleanUp() {
shouldServiceRun = false;
IOUtils.cleanup(LOG, bpNamenode);
bpos.shutdownActor(this);
}
private void handleRollingUpgradeStatus(HeartbeatResponse resp) throws IOException {
RollingUpgradeStatus rollingUpgradeStatus = resp.getRollingUpdateStatus();
if (rollingUpgradeStatus != null &&
rollingUpgradeStatus.getBlockPoolId().compareTo(bpos.getBlockPoolId()) != 0) {
// Can this ever occur?
LOG.error("Invalid BlockPoolId " +
rollingUpgradeStatus.getBlockPoolId() +
" in HeartbeatResponse. Expected " +
bpos.getBlockPoolId());
} else {
bpos.signalRollingUpgrade(rollingUpgradeStatus);
}
}
/**
* Main loop for each BP thread. Run until shutdown,
* forever calling remote NameNode functions.
*/
private void offerService() throws Exception {
LOG.info("For namenode " + nnAddr + " using"
+ " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
+ " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
+ " Initial delay: " + dnConf.initialBlockReportDelay + "msec"
+ "; heartBeatInterval=" + dnConf.heartBeatInterval);
//
// Now loop for a long time....
//
while (shouldRun()) {
try {
final long startTime = monotonicNow();
//
// Every so often, send heartbeat or block-report
//
boolean sendHeartbeat =
startTime - lastHeartbeat >= dnConf.heartBeatInterval;
if (sendHeartbeat) {
//
// All heartbeat messages include following info:
// -- Datanode name
// -- data transfer port
// -- Total capacity
// -- Bytes remaining
//
lastHeartbeat = startTime;
if (!dn.areHeartbeatsDisabledForTests()) {
HeartbeatResponse resp = sendHeartBeat();
assert resp != null;
dn.getMetrics().addHeartbeat(monotonicNow() - startTime);
// If the state of this NN has changed (eg STANDBY->ACTIVE)
// then let the BPOfferService update itself.
//
// Important that this happens before processCommand below,
// since the first heartbeat to a new active might have commands
// that we should actually process.
bpos.updateActorStatesFromHeartbeat(
this, resp.getNameNodeHaState());
state = resp.getNameNodeHaState().getState();
if (state == HAServiceState.ACTIVE) {
handleRollingUpgradeStatus(resp);
}
long startProcessCommands = monotonicNow();
if (!processCommand(resp.getCommands()))
continue;
long endProcessCommands = monotonicNow();
if (endProcessCommands - startProcessCommands > 2000) {
LOG.info("Took " + (endProcessCommands - startProcessCommands)
+ "ms to process " + resp.getCommands().length
+ " commands from NN");
}
}
}
if (sendImmediateIBR || sendHeartbeat) {
reportReceivedDeletedBlocks();
}
List<DatanodeCommand> cmds = blockReport();
processCommand(cmds == null ? null : cmds.toArray(new DatanodeCommand[cmds.size()]));
DatanodeCommand cmd = cacheReport();
processCommand(new DatanodeCommand[]{ cmd });
//
// There is no work to do; sleep until hearbeat timer elapses,
// or work arrives, and then iterate again.
//
long waitTime = dnConf.heartBeatInterval -
(monotonicNow() - lastHeartbeat);
synchronized(pendingIncrementalBRperStorage) {
if (waitTime > 0 && !sendImmediateIBR) {
try {
pendingIncrementalBRperStorage.wait(waitTime);
} catch (InterruptedException ie) {
LOG.warn("BPOfferService for " + this + " interrupted");
}
}
} // synchronized
} catch(RemoteException re) {
String reClass = re.getClassName();
if (UnregisteredNodeException.class.getName().equals(reClass) ||
DisallowedDatanodeException.class.getName().equals(reClass) ||
IncorrectVersionException.class.getName().equals(reClass)) {
LOG.warn(this + " is shutting down", re);
shouldServiceRun = false;
return;
}
LOG.warn("RemoteException in offerService", re);
try {
long sleepTime = Math.min(1000, dnConf.heartBeatInterval);
Thread.sleep(sleepTime);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
} catch (IOException e) {
LOG.warn("IOException in offerService", e);
}
processQueueMessages();
} // while (shouldRun())
} // offerService
/**
* Register one bp with the corresponding NameNode
* <p>
* The bpDatanode needs to register with the namenode on startup in order
* 1) to report which storage it is serving now and
* 2) to receive a registrationID
*
* issued by the namenode to recognize registered datanodes.
*
* @param nsInfo current NamespaceInfo
* @see FSNamesystem#registerDatanode(DatanodeRegistration)
* @throws IOException
*/
void register(NamespaceInfo nsInfo) throws IOException {
// The handshake() phase loaded the block pool storage
// off disk - so update the bpRegistration object from that info
bpRegistration = bpos.createRegistration();
LOG.info(this + " beginning handshake with NN");
while (shouldRun()) {
try {
// Use returned registration from namenode with updated fields
bpRegistration = bpNamenode.registerDatanode(bpRegistration);
bpRegistration.setNamespaceInfo(nsInfo);
break;
} catch(EOFException e) { // namenode might have just restarted
LOG.info("Problem connecting to server: " + nnAddr + " :"
+ e.getLocalizedMessage());
sleepAndLogInterrupts(1000, "connecting to server");
} catch(SocketTimeoutException e) { // namenode is busy
LOG.info("Problem connecting to server: " + nnAddr);
sleepAndLogInterrupts(1000, "connecting to server");
}
}
LOG.info("Block pool " + this + " successfully registered with NN");
bpos.registrationSucceeded(this, bpRegistration);
// random short delay - helps scatter the BR from all DNs
scheduleBlockReport(dnConf.initialBlockReportDelay);
}
private void sleepAndLogInterrupts(int millis,
String stateString) {
try {
Thread.sleep(millis);
} catch (InterruptedException ie) {
LOG.info("BPOfferService " + this + " interrupted while " + stateString);
}
}
/**
* No matter what kind of exception we get, keep retrying to offerService().
* That's the loop that connects to the NameNode and provides basic DataNode
* functionality.
*
* Only stop when "shouldRun" or "shouldServiceRun" is turned off, which can
* happen either at shutdown or due to refreshNamenodes.
*/
@Override
public void run() {
LOG.info(this + " starting to offer service");
try {
while (true) {
// init stuff
try {
// setup storage
connectToNNAndHandshake();
break;
} catch (IOException ioe) {
// Initial handshake, storage recovery or registration failed
runningState = RunningState.INIT_FAILED;
if (shouldRetryInit()) {
// Retry until all namenode's of BPOS failed initialization
LOG.error("Initialization failed for " + this + " "
+ ioe.getLocalizedMessage());
sleepAndLogInterrupts(5000, "initializing");
} else {
runningState = RunningState.FAILED;
LOG.fatal("Initialization failed for " + this + ". Exiting. ", ioe);
return;
}
}
}
runningState = RunningState.RUNNING;
while (shouldRun()) {
try {
offerService();
} catch (Exception ex) {
LOG.error("Exception in BPOfferService for " + this, ex);
sleepAndLogInterrupts(5000, "offering service");
}
}
runningState = RunningState.EXITED;
} catch (Throwable ex) {
LOG.warn("Unexpected exception in block pool " + this, ex);
runningState = RunningState.FAILED;
} finally {
LOG.warn("Ending block pool service for: " + this);
cleanUp();
}
}
private boolean shouldRetryInit() {
return shouldRun() && bpos.shouldRetryInit();
}
private boolean shouldRun() {
return shouldServiceRun && dn.shouldRun();
}
/**
* Process an array of datanode commands
*
* @param cmds an array of datanode commands
* @return true if further processing may be required or false otherwise.
*/
boolean processCommand(DatanodeCommand[] cmds) {
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
try {
if (bpos.processCommandFromActor(cmd, this) == false) {
return false;
}
} catch (IOException ioe) {
LOG.warn("Error processing datanode Command", ioe);
}
}
}
return true;
}
/**
* Report a bad block from another DN in this cluster.
*/
void reportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block)
throws IOException {
LocatedBlock lb = new LocatedBlock(block,
new DatanodeInfo[] {dnInfo});
bpNamenode.reportBadBlocks(new LocatedBlock[] {lb});
}
void reRegister() throws IOException {
if (shouldRun()) {
// re-retrieve namespace info to make sure that, if the NN
// was restarted, we still match its version (HDFS-2120)
NamespaceInfo nsInfo = retrieveNamespaceInfo();
// and re-register
register(nsInfo);
scheduleHeartbeat();
}
}
private static class PerStoragePendingIncrementalBR {
private final Map<Long, ReceivedDeletedBlockInfo> pendingIncrementalBR =
Maps.newHashMap();
/**
* Return the number of blocks on this storage that have pending
* incremental block reports.
* @return
*/
int getBlockInfoCount() {
return pendingIncrementalBR.size();
}
/**
* Dequeue and return all pending incremental block report state.
* @return
*/
ReceivedDeletedBlockInfo[] dequeueBlockInfos() {
ReceivedDeletedBlockInfo[] blockInfos =
pendingIncrementalBR.values().toArray(
new ReceivedDeletedBlockInfo[getBlockInfoCount()]);
pendingIncrementalBR.clear();
return blockInfos;
}
/**
* Add blocks from blockArray to pendingIncrementalBR, unless the
* block already exists in pendingIncrementalBR.
* @param blockArray list of blocks to add.
* @return the number of missing blocks that we added.
*/
int putMissingBlockInfos(ReceivedDeletedBlockInfo[] blockArray) {
int blocksPut = 0;
for (ReceivedDeletedBlockInfo rdbi : blockArray) {
if (!pendingIncrementalBR.containsKey(rdbi.getBlock().getBlockId())) {
pendingIncrementalBR.put(rdbi.getBlock().getBlockId(), rdbi);
++blocksPut;
}
}
return blocksPut;
}
/**
* Add pending incremental block report for a single block.
* @param blockInfo
*/
void putBlockInfo(ReceivedDeletedBlockInfo blockInfo) {
pendingIncrementalBR.put(blockInfo.getBlock().getBlockId(), blockInfo);
}
/**
* Remove pending incremental block report for a single block if it
* exists.
*
* @param blockInfo
* @return true if a report was removed, false if no report existed for
* the given block.
*/
boolean removeBlockInfo(ReceivedDeletedBlockInfo blockInfo) {
return (pendingIncrementalBR.remove(blockInfo.getBlock().getBlockId()) != null);
}
}
void triggerBlockReport(BlockReportOptions options) throws IOException {
if (options.isIncremental()) {
LOG.info(bpos.toString() + ": scheduling an incremental block report.");
synchronized(pendingIncrementalBRperStorage) {
sendImmediateIBR = true;
pendingIncrementalBRperStorage.notifyAll();
}
} else {
LOG.info(bpos.toString() + ": scheduling a full block report.");
synchronized(pendingIncrementalBRperStorage) {
lastBlockReport = 0;
pendingIncrementalBRperStorage.notifyAll();
}
}
}
public void bpThreadEnqueue(BPServiceActorAction action) {
synchronized (bpThreadQueue) {
if (!bpThreadQueue.contains(action)) {
bpThreadQueue.add(action);
}
}
}
private void processQueueMessages() {
LinkedList<BPServiceActorAction> duplicateQueue;
synchronized (bpThreadQueue) {
duplicateQueue = new LinkedList<BPServiceActorAction>(bpThreadQueue);
bpThreadQueue.clear();
}
while (!duplicateQueue.isEmpty()) {
BPServiceActorAction actionItem = duplicateQueue.remove();
try {
actionItem.reportTo(bpNamenode, bpRegistration);
} catch (BPServiceActorActionException baae) {
LOG.warn(baae.getMessage() + nnAddr , baae);
// Adding it back to the queue if not present
bpThreadEnqueue(actionItem);
}
}
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.service;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.lifecycle.SSTableSet;
import org.apache.cassandra.db.lifecycle.View;
import org.apache.cassandra.dht.Bounds;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.exceptions.RequestFailureReason;
import org.apache.cassandra.gms.ApplicationState;
import org.apache.cassandra.gms.EndpointState;
import org.apache.cassandra.gms.FailureDetector;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.gms.IFailureDetector;
import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
import org.apache.cassandra.gms.IFailureDetectionEventListener;
import org.apache.cassandra.gms.VersionedValue;
import org.apache.cassandra.io.sstable.format.SSTableReader;
import org.apache.cassandra.locator.TokenMetadata;
import org.apache.cassandra.net.IAsyncCallbackWithFailure;
import org.apache.cassandra.net.MessageIn;
import org.apache.cassandra.net.MessageOut;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.repair.AnticompactionTask;
import org.apache.cassandra.repair.RepairJobDesc;
import org.apache.cassandra.repair.RepairParallelism;
import org.apache.cassandra.repair.RepairSession;
import org.apache.cassandra.repair.messages.*;
import org.apache.cassandra.utils.CassandraVersion;
import org.apache.cassandra.utils.Clock;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.UUIDGen;
import org.apache.cassandra.utils.concurrent.Ref;
import org.apache.cassandra.utils.concurrent.Refs;
/**
* ActiveRepairService is the starting point for manual "active" repairs.
*
* Each user triggered repair will correspond to one or multiple repair session,
* one for each token range to repair. On repair session might repair multiple
* column families. For each of those column families, the repair session will
* request merkle trees for each replica of the range being repaired, diff those
* trees upon receiving them, schedule the streaming ofthe parts to repair (based on
* the tree diffs) and wait for all those operation. See RepairSession for more
* details.
*
* The creation of a repair session is done through the submitRepairSession that
* returns a future on the completion of that session.
*/
public class ActiveRepairService implements IEndpointStateChangeSubscriber, IFailureDetectionEventListener
{
/**
* @deprecated this statuses are from the previous JMX notification service,
* which will be deprecated on 4.0. For statuses of the new notification
* service, see {@link org.apache.cassandra.streaming.StreamEvent.ProgressEvent}
*/
@Deprecated
public static enum Status
{
STARTED, SESSION_SUCCESS, SESSION_FAILED, FINISHED
}
private boolean registeredForEndpointChanges = false;
public static CassandraVersion SUPPORTS_GLOBAL_PREPARE_FLAG_VERSION = new CassandraVersion("2.2.1");
private static final Logger logger = LoggerFactory.getLogger(ActiveRepairService.class);
// singleton enforcement
public static final ActiveRepairService instance = new ActiveRepairService(FailureDetector.instance, Gossiper.instance);
public static final long UNREPAIRED_SSTABLE = 0;
/**
* A map of active coordinator session.
*/
private final ConcurrentMap<UUID, RepairSession> sessions = new ConcurrentHashMap<>();
private final ConcurrentMap<UUID, ParentRepairSession> parentRepairSessions = new ConcurrentHashMap<>();
private final IFailureDetector failureDetector;
private final Gossiper gossiper;
public ActiveRepairService(IFailureDetector failureDetector, Gossiper gossiper)
{
this.failureDetector = failureDetector;
this.gossiper = gossiper;
}
/**
* Requests repairs for the given keyspace and column families.
*
* @return Future for asynchronous call or null if there is no need to repair
*/
public RepairSession submitRepairSession(UUID parentRepairSession,
Collection<Range<Token>> range,
String keyspace,
RepairParallelism parallelismDegree,
Set<InetAddress> endpoints,
long repairedAt,
boolean pullRepair,
ListeningExecutorService executor,
String... cfnames)
{
if (endpoints.isEmpty())
return null;
if (cfnames.length == 0)
return null;
final RepairSession session = new RepairSession(parentRepairSession, UUIDGen.getTimeUUID(), range, keyspace, parallelismDegree, endpoints, repairedAt, pullRepair, cfnames);
sessions.put(session.getId(), session);
// register listeners
registerOnFdAndGossip(session);
// remove session at completion
session.addListener(new Runnable()
{
/**
* When repair finished, do clean up
*/
public void run()
{
sessions.remove(session.getId());
}
}, MoreExecutors.directExecutor());
session.start(executor);
return session;
}
private <T extends AbstractFuture &
IEndpointStateChangeSubscriber &
IFailureDetectionEventListener> void registerOnFdAndGossip(final T task)
{
gossiper.register(task);
failureDetector.registerFailureDetectionEventListener(task);
// unregister listeners at completion
task.addListener(new Runnable()
{
/**
* When repair finished, do clean up
*/
public void run()
{
failureDetector.unregisterFailureDetectionEventListener(task);
gossiper.unregister(task);
}
}, MoreExecutors.sameThreadExecutor());
}
public synchronized void terminateSessions()
{
Throwable cause = new IOException("Terminate session is called");
for (RepairSession session : sessions.values())
{
session.forceShutdown(cause);
}
parentRepairSessions.clear();
}
/**
* Return all of the neighbors with whom we share the provided range.
*
* @param keyspaceName keyspace to repair
* @param keyspaceLocalRanges local-range for given keyspaceName
* @param toRepair token to repair
* @param dataCenters the data centers to involve in the repair
*
* @return neighbors with whom we share the provided range
*/
public static Set<InetAddress> getNeighbors(String keyspaceName, Collection<Range<Token>> keyspaceLocalRanges,
Range<Token> toRepair, Collection<String> dataCenters,
Collection<String> hosts)
{
StorageService ss = StorageService.instance;
Map<Range<Token>, List<InetAddress>> replicaSets = ss.getRangeToAddressMap(keyspaceName);
Range<Token> rangeSuperSet = null;
for (Range<Token> range : keyspaceLocalRanges)
{
if (range.contains(toRepair))
{
rangeSuperSet = range;
break;
}
else if (range.intersects(toRepair))
{
throw new IllegalArgumentException("Requested range intersects a local range but is not fully contained in one; this would lead to imprecise repair");
}
}
if (rangeSuperSet == null || !replicaSets.containsKey(rangeSuperSet))
return Collections.emptySet();
Set<InetAddress> neighbors = new HashSet<>(replicaSets.get(rangeSuperSet));
neighbors.remove(FBUtilities.getBroadcastAddress());
if (dataCenters != null && !dataCenters.isEmpty())
{
TokenMetadata.Topology topology = ss.getTokenMetadata().cloneOnlyTokenMap().getTopology();
Set<InetAddress> dcEndpoints = Sets.newHashSet();
Multimap<String,InetAddress> dcEndpointsMap = topology.getDatacenterEndpoints();
for (String dc : dataCenters)
{
Collection<InetAddress> c = dcEndpointsMap.get(dc);
if (c != null)
dcEndpoints.addAll(c);
}
return Sets.intersection(neighbors, dcEndpoints);
}
else if (hosts != null && !hosts.isEmpty())
{
Set<InetAddress> specifiedHost = new HashSet<>();
for (final String host : hosts)
{
try
{
final InetAddress endpoint = InetAddress.getByName(host.trim());
if (endpoint.equals(FBUtilities.getBroadcastAddress()) || neighbors.contains(endpoint))
specifiedHost.add(endpoint);
}
catch (UnknownHostException e)
{
throw new IllegalArgumentException("Unknown host specified " + host, e);
}
}
if (!specifiedHost.contains(FBUtilities.getBroadcastAddress()))
throw new IllegalArgumentException("The current host must be part of the repair");
if (specifiedHost.size() <= 1)
{
String msg = "Specified hosts %s do not share range %s needed for repair. Either restrict repair ranges " +
"with -st/-et options, or specify one of the neighbors that share this range with " +
"this node: %s.";
throw new IllegalArgumentException(String.format(msg, hosts, toRepair, neighbors));
}
specifiedHost.remove(FBUtilities.getBroadcastAddress());
return specifiedHost;
}
return neighbors;
}
public synchronized UUID prepareForRepair(UUID parentRepairSession, InetAddress coordinator, Set<InetAddress> endpoints, RepairOption options, List<ColumnFamilyStore> columnFamilyStores)
{
long timestamp = Clock.instance.currentTimeMillis();
registerParentRepairSession(parentRepairSession, coordinator, columnFamilyStores, options.getRanges(), options.isIncremental(), timestamp, options.isGlobal());
final CountDownLatch prepareLatch = new CountDownLatch(endpoints.size());
final AtomicBoolean status = new AtomicBoolean(true);
final Set<String> failedNodes = Collections.synchronizedSet(new HashSet<String>());
IAsyncCallbackWithFailure callback = new IAsyncCallbackWithFailure()
{
public void response(MessageIn msg)
{
prepareLatch.countDown();
}
public boolean isLatencyForSnitch()
{
return false;
}
public void onFailure(InetAddress from, RequestFailureReason failureReason)
{
status.set(false);
failedNodes.add(from.getHostAddress());
prepareLatch.countDown();
}
};
List<UUID> cfIds = new ArrayList<>(columnFamilyStores.size());
for (ColumnFamilyStore cfs : columnFamilyStores)
cfIds.add(cfs.metadata.cfId);
for (InetAddress neighbour : endpoints)
{
if (FailureDetector.instance.isAlive(neighbour))
{
PrepareMessage message = new PrepareMessage(parentRepairSession, cfIds, options.getRanges(), options.isIncremental(), timestamp, options.isGlobal());
MessageOut<RepairMessage> msg = message.createMessage();
MessagingService.instance().sendRR(msg, neighbour, callback, TimeUnit.HOURS.toMillis(1), true);
}
else
{
status.set(false);
failedNodes.add(neighbour.getHostAddress());
prepareLatch.countDown();
}
}
try
{
prepareLatch.await(1, TimeUnit.HOURS);
}
catch (InterruptedException e)
{
removeParentRepairSession(parentRepairSession);
throw new RuntimeException("Did not get replies from all endpoints. List of failed endpoint(s): " + failedNodes.toString(), e);
}
if (!status.get())
{
removeParentRepairSession(parentRepairSession);
throw new RuntimeException("Did not get positive replies from all endpoints. List of failed endpoint(s): " + failedNodes.toString());
}
return parentRepairSession;
}
public void registerParentRepairSession(UUID parentRepairSession, InetAddress coordinator, List<ColumnFamilyStore> columnFamilyStores, Collection<Range<Token>> ranges, boolean isIncremental, long timestamp, boolean isGlobal)
{
if (!registeredForEndpointChanges)
{
Gossiper.instance.register(this);
FailureDetector.instance.registerFailureDetectionEventListener(this);
registeredForEndpointChanges = true;
}
parentRepairSessions.put(parentRepairSession, new ParentRepairSession(coordinator, columnFamilyStores, ranges, isIncremental, timestamp, isGlobal));
}
public Set<SSTableReader> currentlyRepairing(UUID cfId, UUID parentRepairSession)
{
Set<SSTableReader> repairing = new HashSet<>();
for (Map.Entry<UUID, ParentRepairSession> entry : parentRepairSessions.entrySet())
{
Collection<SSTableReader> sstables = entry.getValue().getActiveSSTables(cfId);
if (sstables != null && !entry.getKey().equals(parentRepairSession))
repairing.addAll(sstables);
}
return repairing;
}
/**
* Run final process of repair.
* This removes all resources held by parent repair session, after performing anti compaction if necessary.
*
* @param parentSession Parent session ID
* @param neighbors Repair participants (not including self)
* @param successfulRanges Ranges that repaired successfully
*/
public synchronized ListenableFuture finishParentSession(UUID parentSession, Set<InetAddress> neighbors, Collection<Range<Token>> successfulRanges)
{
List<ListenableFuture<?>> tasks = new ArrayList<>(neighbors.size() + 1);
for (InetAddress neighbor : neighbors)
{
AnticompactionTask task = new AnticompactionTask(parentSession, neighbor, successfulRanges);
registerOnFdAndGossip(task);
tasks.add(task);
task.run(); // 'run' is just sending message
}
tasks.add(doAntiCompaction(parentSession, successfulRanges));
return Futures.successfulAsList(tasks);
}
public ParentRepairSession getParentRepairSession(UUID parentSessionId)
{
ParentRepairSession session = parentRepairSessions.get(parentSessionId);
// this can happen if a node thinks that the coordinator was down, but that coordinator got back before noticing
// that it was down itself.
if (session == null)
throw new RuntimeException("Parent repair session with id = " + parentSessionId + " has failed.");
return session;
}
/**
* called when the repair session is done - either failed or anticompaction has completed
*
* clears out any snapshots created by this repair
*
* @param parentSessionId
* @return
*/
public synchronized ParentRepairSession removeParentRepairSession(UUID parentSessionId)
{
String snapshotName = parentSessionId.toString();
for (ColumnFamilyStore cfs : getParentRepairSession(parentSessionId).columnFamilyStores.values())
{
if (cfs.snapshotExists(snapshotName))
cfs.clearSnapshot(snapshotName);
}
return parentRepairSessions.remove(parentSessionId);
}
/**
* Submit anti-compaction jobs to CompactionManager.
* When all jobs are done, parent repair session is removed whether those are suceeded or not.
*
* @param parentRepairSession parent repair session ID
* @return Future result of all anti-compaction jobs.
*/
@SuppressWarnings("resource")
public ListenableFuture<List<Object>> doAntiCompaction(final UUID parentRepairSession, Collection<Range<Token>> successfulRanges)
{
assert parentRepairSession != null;
ParentRepairSession prs = getParentRepairSession(parentRepairSession);
//A repair will be marked as not global if it is a subrange repair to avoid many small anti-compactions
//in addition to other scenarios such as repairs not involving all DCs or hosts
if (!prs.isGlobal)
{
logger.info("[repair #{}] Not a global repair, will not do anticompaction", parentRepairSession);
removeParentRepairSession(parentRepairSession);
return Futures.immediateFuture(Collections.emptyList());
}
assert prs.ranges.containsAll(successfulRanges) : "Trying to perform anticompaction on unknown ranges";
List<ListenableFuture<?>> futures = new ArrayList<>();
// if we don't have successful repair ranges, then just skip anticompaction
if (!successfulRanges.isEmpty())
{
for (Map.Entry<UUID, ColumnFamilyStore> columnFamilyStoreEntry : prs.columnFamilyStores.entrySet())
{
Refs<SSTableReader> sstables = prs.getActiveRepairedSSTableRefsForAntiCompaction(columnFamilyStoreEntry.getKey(), parentRepairSession);
ColumnFamilyStore cfs = columnFamilyStoreEntry.getValue();
futures.add(CompactionManager.instance.submitAntiCompaction(cfs, successfulRanges, sstables, prs.repairedAt, parentRepairSession));
}
}
ListenableFuture<List<Object>> allAntiCompactionResults = Futures.successfulAsList(futures);
allAntiCompactionResults.addListener(new Runnable()
{
@Override
public void run()
{
removeParentRepairSession(parentRepairSession);
}
}, MoreExecutors.directExecutor());
return allAntiCompactionResults;
}
public void handleMessage(InetAddress endpoint, RepairMessage message)
{
RepairJobDesc desc = message.desc;
RepairSession session = sessions.get(desc.sessionId);
if (session == null)
return;
switch (message.messageType)
{
case VALIDATION_COMPLETE:
ValidationComplete validation = (ValidationComplete) message;
session.validationComplete(desc, endpoint, validation.trees);
break;
case SYNC_COMPLETE:
// one of replica is synced.
SyncComplete sync = (SyncComplete) message;
session.syncComplete(desc, sync.nodes, sync.success);
break;
default:
break;
}
}
/**
* We keep a ParentRepairSession around for the duration of the entire repair, for example, on a 256 token vnode rf=3 cluster
* we would have 768 RepairSession but only one ParentRepairSession. We use the PRS to avoid anticompacting the sstables
* 768 times, instead we take all repaired ranges at the end of the repair and anticompact once.
*
* We do an optimistic marking of sstables - when we start an incremental repair we mark all unrepaired sstables as
* repairing (@see markSSTablesRepairing), then while the repair is ongoing compactions might remove those sstables,
* and when it is time for anticompaction we will only anticompact the sstables that are still on disk.
*
* Note that validation and streaming do not care about which sstables we have marked as repairing - they operate on
* all unrepaired sstables (if it is incremental), otherwise we would not get a correct repair.
*/
public static class ParentRepairSession
{
private final Map<UUID, ColumnFamilyStore> columnFamilyStores = new HashMap<>();
private final Collection<Range<Token>> ranges;
public final Map<UUID, Set<String>> sstableMap = new HashMap<>();
public final boolean isIncremental;
public final boolean isGlobal;
public final long repairedAt;
public final InetAddress coordinator;
/**
* Indicates whether we have marked sstables as repairing. Can only be done once per table per ParentRepairSession
*/
private final Set<UUID> marked = new HashSet<>();
public ParentRepairSession(InetAddress coordinator, List<ColumnFamilyStore> columnFamilyStores, Collection<Range<Token>> ranges, boolean isIncremental, long repairedAt, boolean isGlobal)
{
this.coordinator = coordinator;
for (ColumnFamilyStore cfs : columnFamilyStores)
{
this.columnFamilyStores.put(cfs.metadata.cfId, cfs);
sstableMap.put(cfs.metadata.cfId, new HashSet<String>());
}
this.ranges = ranges;
this.repairedAt = repairedAt;
this.isIncremental = isIncremental;
this.isGlobal = isGlobal;
}
/**
* Mark sstables repairing - either all sstables or only the unrepaired ones depending on
*
* whether this is an incremental or full repair
*
* @param cfId the column family
* @param parentSessionId the parent repair session id, used to make sure we don't start multiple repairs over the same sstables
*/
public synchronized void markSSTablesRepairing(UUID cfId, UUID parentSessionId)
{
if (!marked.contains(cfId))
{
List<SSTableReader> sstables = columnFamilyStores.get(cfId).select(View.select(SSTableSet.CANONICAL, (s) -> !isIncremental || !s.isRepaired())).sstables;
Set<SSTableReader> currentlyRepairing = ActiveRepairService.instance.currentlyRepairing(cfId, parentSessionId);
if (!Sets.intersection(currentlyRepairing, Sets.newHashSet(sstables)).isEmpty())
{
logger.error("Cannot start multiple repair sessions over the same sstables");
throw new RuntimeException("Cannot start multiple repair sessions over the same sstables");
}
addSSTables(cfId, sstables);
marked.add(cfId);
}
}
/**
* Get the still active sstables we should run anticompaction on
*
* note that validation and streaming do not call this method - they have to work on the actual active sstables on the node, we only call this
* to know which sstables are still there that were there when we started the repair
*
* @param cfId
* @param parentSessionId for checking if there exists a snapshot for this repair
* @return
*/
@SuppressWarnings("resource")
public synchronized Refs<SSTableReader> getActiveRepairedSSTableRefsForAntiCompaction(UUID cfId, UUID parentSessionId)
{
assert marked.contains(cfId);
if (!columnFamilyStores.containsKey(cfId))
throw new RuntimeException("Not possible to get sstables for anticompaction for " + cfId);
boolean isSnapshotRepair = columnFamilyStores.get(cfId).snapshotExists(parentSessionId.toString());
ImmutableMap.Builder<SSTableReader, Ref<SSTableReader>> references = ImmutableMap.builder();
Iterable<SSTableReader> sstables = isSnapshotRepair ? getSSTablesForSnapshotRepair(cfId, parentSessionId) : getActiveSSTables(cfId);
// we check this above - if columnFamilyStores contains the cfId sstables will not be null
assert sstables != null;
for (SSTableReader sstable : sstables)
{
Ref<SSTableReader> ref = sstable.tryRef();
if (ref == null)
sstableMap.get(cfId).remove(sstable.getFilename());
else
references.put(sstable, ref);
}
return new Refs<>(references.build());
}
/**
* If we are running a snapshot repair we need to find the 'real' sstables when we start anticompaction
*
* We use the generation of the sstables as identifiers instead of the file name to avoid having to parse out the
* actual filename.
*
* @param cfId
* @param parentSessionId
* @return
*/
private Set<SSTableReader> getSSTablesForSnapshotRepair(UUID cfId, UUID parentSessionId)
{
Set<SSTableReader> activeSSTables = new HashSet<>();
ColumnFamilyStore cfs = columnFamilyStores.get(cfId);
if (cfs == null)
return null;
Set<Integer> snapshotGenerations = new HashSet<>();
try (Refs<SSTableReader> snapshottedSSTables = cfs.getSnapshotSSTableReader(parentSessionId.toString()))
{
for (SSTableReader sstable : snapshottedSSTables)
{
snapshotGenerations.add(sstable.descriptor.generation);
}
}
catch (IOException e)
{
throw new RuntimeException(e);
}
for (SSTableReader sstable : cfs.getSSTables(SSTableSet.CANONICAL))
if (snapshotGenerations.contains(sstable.descriptor.generation))
activeSSTables.add(sstable);
return activeSSTables;
}
public synchronized void maybeSnapshot(UUID cfId, UUID parentSessionId)
{
String snapshotName = parentSessionId.toString();
if (!columnFamilyStores.get(cfId).snapshotExists(snapshotName))
{
Set<SSTableReader> snapshottedSSTables = columnFamilyStores.get(cfId).snapshot(snapshotName, new Predicate<SSTableReader>()
{
public boolean apply(SSTableReader sstable)
{
return sstable != null &&
(!isIncremental || !sstable.isRepaired()) &&
!(sstable.metadata.isIndex()) && // exclude SSTables from 2i
new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges);
}
}, true, false);
if (isAlreadyRepairing(cfId, parentSessionId, snapshottedSSTables))
{
columnFamilyStores.get(cfId).clearSnapshot(parentSessionId.toString());
logger.error("Cannot start multiple repair sessions over the same sstables");
throw new RuntimeException("Cannot start multiple repair sessions over the same sstables");
}
addSSTables(cfId, snapshottedSSTables);
marked.add(cfId);
}
}
/**
* Compares other repairing sstables *generation* to the ones we just snapshotted
*
* we compare generations since the sstables have different paths due to snapshot names
*
* @param cfId id of the column family store
* @param parentSessionId parent repair session
* @param sstables the newly snapshotted sstables
* @return
*/
private boolean isAlreadyRepairing(UUID cfId, UUID parentSessionId, Collection<SSTableReader> sstables)
{
Set<SSTableReader> currentlyRepairing = ActiveRepairService.instance.currentlyRepairing(cfId, parentSessionId);
Set<Integer> currentlyRepairingGenerations = new HashSet<>();
Set<Integer> newRepairingGenerations = new HashSet<>();
for (SSTableReader sstable : currentlyRepairing)
currentlyRepairingGenerations.add(sstable.descriptor.generation);
for (SSTableReader sstable : sstables)
newRepairingGenerations.add(sstable.descriptor.generation);
return !Sets.intersection(currentlyRepairingGenerations, newRepairingGenerations).isEmpty();
}
private Set<SSTableReader> getActiveSSTables(UUID cfId)
{
if (!columnFamilyStores.containsKey(cfId))
return null;
Set<String> repairedSSTables = sstableMap.get(cfId);
Set<SSTableReader> activeSSTables = new HashSet<>();
Set<String> activeSSTableNames = new HashSet<>();
ColumnFamilyStore cfs = columnFamilyStores.get(cfId);
for (SSTableReader sstable : cfs.getSSTables(SSTableSet.CANONICAL))
{
if (repairedSSTables.contains(sstable.getFilename()))
{
activeSSTables.add(sstable);
activeSSTableNames.add(sstable.getFilename());
}
}
sstableMap.put(cfId, activeSSTableNames);
return activeSSTables;
}
private void addSSTables(UUID cfId, Collection<SSTableReader> sstables)
{
for (SSTableReader sstable : sstables)
sstableMap.get(cfId).add(sstable.getFilename());
}
public long getRepairedAt()
{
if (isGlobal)
return repairedAt;
return ActiveRepairService.UNREPAIRED_SSTABLE;
}
@Override
public String toString()
{
return "ParentRepairSession{" +
"columnFamilyStores=" + columnFamilyStores +
", ranges=" + ranges +
", sstableMap=" + sstableMap +
", repairedAt=" + repairedAt +
'}';
}
}
/*
If the coordinator node dies we should remove the parent repair session from the other nodes.
This uses the same notifications as we get in RepairSession
*/
public void onJoin(InetAddress endpoint, EndpointState epState) {}
public void beforeChange(InetAddress endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue) {}
public void onChange(InetAddress endpoint, ApplicationState state, VersionedValue value) {}
public void onAlive(InetAddress endpoint, EndpointState state) {}
public void onDead(InetAddress endpoint, EndpointState state) {}
public void onRemove(InetAddress endpoint)
{
convict(endpoint, Double.MAX_VALUE);
}
public void onRestart(InetAddress endpoint, EndpointState state)
{
convict(endpoint, Double.MAX_VALUE);
}
/**
* Something has happened to a remote node - if that node is a coordinator, we mark the parent repair session id as failed.
*
* The fail marker is kept in the map for 24h to make sure that if the coordinator does not agree
* that the repair failed, we need to fail the entire repair session
*
* @param ep endpoint to be convicted
* @param phi the value of phi with with ep was convicted
*/
public void convict(InetAddress ep, double phi)
{
// We want a higher confidence in the failure detection than usual because failing a repair wrongly has a high cost.
if (phi < 2 * DatabaseDescriptor.getPhiConvictThreshold() || parentRepairSessions.isEmpty())
return;
Set<UUID> toRemove = new HashSet<>();
for (Map.Entry<UUID, ParentRepairSession> repairSessionEntry : parentRepairSessions.entrySet())
{
if (repairSessionEntry.getValue().coordinator.equals(ep))
{
toRemove.add(repairSessionEntry.getKey());
}
}
if (!toRemove.isEmpty())
{
logger.debug("Removing {} in parent repair sessions", toRemove);
for (UUID id : toRemove)
removeParentRepairSession(id);
}
}
}
|
|
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* Copyright (c) 2008-2011, Red Hat Inc. or third-party contributors as
* indicated by the @author tags or express copyright attribution
* statements applied by the authors. All third-party contributions are
* distributed under license by Red Hat Inc.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this distribution; if not, write to:
* Free Software Foundation, Inc.
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301 USA
*/
package org.hibernate.persister.entity;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.hibernate.AssertionFailure;
import org.hibernate.HibernateException;
import org.hibernate.MappingException;
import org.hibernate.QueryException;
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
import org.hibernate.cache.spi.access.NaturalIdRegionAccessStrategy;
import org.hibernate.engine.OptimisticLockStyle;
import org.hibernate.engine.spi.ExecuteUpdateResultCheckStyle;
import org.hibernate.engine.spi.Mapping;
import org.hibernate.engine.spi.SessionFactoryImplementor;
import org.hibernate.internal.DynamicFilterAliasGenerator;
import org.hibernate.internal.FilterAliasGenerator;
import org.hibernate.internal.util.collections.ArrayHelper;
import org.hibernate.mapping.Column;
import org.hibernate.mapping.Join;
import org.hibernate.mapping.KeyValue;
import org.hibernate.mapping.PersistentClass;
import org.hibernate.mapping.Property;
import org.hibernate.mapping.Selectable;
import org.hibernate.mapping.Subclass;
import org.hibernate.mapping.Table;
import org.hibernate.metamodel.binding.EntityBinding;
import org.hibernate.sql.CaseFragment;
import org.hibernate.sql.SelectFragment;
import org.hibernate.type.StandardBasicTypes;
import org.hibernate.type.Type;
/**
* An <tt>EntityPersister</tt> implementing the normalized "table-per-subclass"
* mapping strategy
*
* @author Gavin King
*/
public class JoinedSubclassEntityPersister extends AbstractEntityPersister {
// the class hierarchy structure
private final int tableSpan;
private final String[] tableNames;
private final String[] naturalOrderTableNames;
private final String[][] tableKeyColumns;
private final String[][] tableKeyColumnReaders;
private final String[][] tableKeyColumnReaderTemplates;
private final String[][] naturalOrderTableKeyColumns;
private final String[][] naturalOrderTableKeyColumnReaders;
private final String[][] naturalOrderTableKeyColumnReaderTemplates;
private final boolean[] naturalOrderCascadeDeleteEnabled;
private final String[] spaces;
private final String[] subclassClosure;
private final String[] subclassTableNameClosure;
private final String[][] subclassTableKeyColumnClosure;
private final boolean[] isClassOrSuperclassTable;
// properties of this class, including inherited properties
private final int[] naturalOrderPropertyTableNumbers;
private final int[] propertyTableNumbers;
// the closure of all properties in the entire hierarchy including
// subclasses and superclasses of this class
private final int[] subclassPropertyTableNumberClosure;
// the closure of all columns used by the entire hierarchy including
// subclasses and superclasses of this class
private final int[] subclassColumnTableNumberClosure;
private final int[] subclassFormulaTableNumberClosure;
private final boolean[] subclassTableSequentialSelect;
private final boolean[] subclassTableIsLazyClosure;
// subclass discrimination works by assigning particular
// values to certain combinations of null primary key
// values in the outer join using an SQL CASE
private final Map subclassesByDiscriminatorValue = new HashMap();
private final String[] discriminatorValues;
private final String[] notNullColumnNames;
private final int[] notNullColumnTableNumbers;
private final String[] constraintOrderedTableNames;
private final String[][] constraintOrderedKeyColumnNames;
private final Object discriminatorValue;
private final String discriminatorSQLString;
// Span of the tables directly mapped by this entity and super-classes, if any
private final int coreTableSpan;
// only contains values for SecondaryTables, ie. not tables part of the "coreTableSpan"
private final boolean[] isNullableTable;
//INITIALIZATION:
public JoinedSubclassEntityPersister(
final PersistentClass persistentClass,
final EntityRegionAccessStrategy cacheAccessStrategy,
final NaturalIdRegionAccessStrategy naturalIdRegionAccessStrategy,
final SessionFactoryImplementor factory,
final Mapping mapping) throws HibernateException {
super( persistentClass, cacheAccessStrategy, naturalIdRegionAccessStrategy, factory );
// DISCRIMINATOR
if ( persistentClass.isPolymorphic() ) {
try {
discriminatorValue = persistentClass.getSubclassId();
discriminatorSQLString = discriminatorValue.toString();
}
catch ( Exception e ) {
throw new MappingException( "Could not format discriminator value to SQL string", e );
}
}
else {
discriminatorValue = null;
discriminatorSQLString = null;
}
if ( optimisticLockStyle() == OptimisticLockStyle.ALL || optimisticLockStyle() == OptimisticLockStyle.DIRTY ) {
throw new MappingException( "optimistic-lock=all|dirty not supported for joined-subclass mappings [" + getEntityName() + "]" );
}
//MULTITABLES
final int idColumnSpan = getIdentifierColumnSpan();
ArrayList tables = new ArrayList();
ArrayList keyColumns = new ArrayList();
ArrayList keyColumnReaders = new ArrayList();
ArrayList keyColumnReaderTemplates = new ArrayList();
ArrayList cascadeDeletes = new ArrayList();
Iterator titer = persistentClass.getTableClosureIterator();
Iterator kiter = persistentClass.getKeyClosureIterator();
while ( titer.hasNext() ) {
Table tab = (Table) titer.next();
KeyValue key = (KeyValue) kiter.next();
String tabname = tab.getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
);
tables.add( tabname );
String[] keyCols = new String[idColumnSpan];
String[] keyColReaders = new String[idColumnSpan];
String[] keyColReaderTemplates = new String[idColumnSpan];
Iterator citer = key.getColumnIterator();
for ( int k = 0; k < idColumnSpan; k++ ) {
Column column = (Column) citer.next();
keyCols[k] = column.getQuotedName( factory.getDialect() );
keyColReaders[k] = column.getReadExpr( factory.getDialect() );
keyColReaderTemplates[k] = column.getTemplate( factory.getDialect(), factory.getSqlFunctionRegistry() );
}
keyColumns.add( keyCols );
keyColumnReaders.add( keyColReaders );
keyColumnReaderTemplates.add( keyColReaderTemplates );
cascadeDeletes.add( key.isCascadeDeleteEnabled() && factory.getDialect().supportsCascadeDelete() );
}
//Span of the tables directly mapped by this entity and super-classes, if any
coreTableSpan = tables.size();
isNullableTable = new boolean[persistentClass.getJoinClosureSpan()];
int tableIndex = 0;
Iterator joinIter = persistentClass.getJoinClosureIterator();
while ( joinIter.hasNext() ) {
Join join = (Join) joinIter.next();
isNullableTable[tableIndex++] = join.isOptional();
Table table = join.getTable();
String tableName = table.getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
);
tables.add( tableName );
KeyValue key = join.getKey();
int joinIdColumnSpan = key.getColumnSpan();
String[] keyCols = new String[joinIdColumnSpan];
String[] keyColReaders = new String[joinIdColumnSpan];
String[] keyColReaderTemplates = new String[joinIdColumnSpan];
Iterator citer = key.getColumnIterator();
for ( int k = 0; k < joinIdColumnSpan; k++ ) {
Column column = (Column) citer.next();
keyCols[k] = column.getQuotedName( factory.getDialect() );
keyColReaders[k] = column.getReadExpr( factory.getDialect() );
keyColReaderTemplates[k] = column.getTemplate( factory.getDialect(), factory.getSqlFunctionRegistry() );
}
keyColumns.add( keyCols );
keyColumnReaders.add( keyColReaders );
keyColumnReaderTemplates.add( keyColReaderTemplates );
cascadeDeletes.add( key.isCascadeDeleteEnabled() && factory.getDialect().supportsCascadeDelete() );
}
naturalOrderTableNames = ArrayHelper.toStringArray( tables );
naturalOrderTableKeyColumns = ArrayHelper.to2DStringArray( keyColumns );
naturalOrderTableKeyColumnReaders = ArrayHelper.to2DStringArray( keyColumnReaders );
naturalOrderTableKeyColumnReaderTemplates = ArrayHelper.to2DStringArray( keyColumnReaderTemplates );
naturalOrderCascadeDeleteEnabled = ArrayHelper.toBooleanArray( cascadeDeletes );
ArrayList subtables = new ArrayList();
ArrayList isConcretes = new ArrayList();
ArrayList isDeferreds = new ArrayList();
ArrayList isLazies = new ArrayList();
keyColumns = new ArrayList();
titer = persistentClass.getSubclassTableClosureIterator();
while ( titer.hasNext() ) {
Table tab = (Table) titer.next();
isConcretes.add( persistentClass.isClassOrSuperclassTable( tab ) );
isDeferreds.add( Boolean.FALSE );
isLazies.add( Boolean.FALSE );
String tabname = tab.getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
);
subtables.add( tabname );
String[] key = new String[idColumnSpan];
Iterator citer = tab.getPrimaryKey().getColumnIterator();
for ( int k = 0; k < idColumnSpan; k++ ) {
key[k] = ( (Column) citer.next() ).getQuotedName( factory.getDialect() );
}
keyColumns.add( key );
}
//Add joins
joinIter = persistentClass.getSubclassJoinClosureIterator();
while ( joinIter.hasNext() ) {
Join join = (Join) joinIter.next();
Table tab = join.getTable();
isConcretes.add( persistentClass.isClassOrSuperclassTable( tab ) );
isDeferreds.add( join.isSequentialSelect() );
isLazies.add( join.isLazy() );
String tabname = tab.getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
);
subtables.add( tabname );
String[] key = new String[idColumnSpan];
Iterator citer = tab.getPrimaryKey().getColumnIterator();
for ( int k = 0; k < idColumnSpan; k++ ) {
key[k] = ( (Column) citer.next() ).getQuotedName( factory.getDialect() );
}
keyColumns.add( key );
}
String[] naturalOrderSubclassTableNameClosure = ArrayHelper.toStringArray( subtables );
String[][] naturalOrderSubclassTableKeyColumnClosure = ArrayHelper.to2DStringArray( keyColumns );
isClassOrSuperclassTable = ArrayHelper.toBooleanArray( isConcretes );
subclassTableSequentialSelect = ArrayHelper.toBooleanArray( isDeferreds );
subclassTableIsLazyClosure = ArrayHelper.toBooleanArray( isLazies );
constraintOrderedTableNames = new String[naturalOrderSubclassTableNameClosure.length];
constraintOrderedKeyColumnNames = new String[naturalOrderSubclassTableNameClosure.length][];
int currentPosition = 0;
for ( int i = naturalOrderSubclassTableNameClosure.length - 1; i >= 0; i--, currentPosition++ ) {
constraintOrderedTableNames[currentPosition] = naturalOrderSubclassTableNameClosure[i];
constraintOrderedKeyColumnNames[currentPosition] = naturalOrderSubclassTableKeyColumnClosure[i];
}
/**
* Suppose an entity Client extends Person, mapped to the tables CLIENT and PERSON respectively.
* For the Client entity:
* naturalOrderTableNames -> PERSON, CLIENT; this reflects the sequence in which the tables are
* added to the meta-data when the annotated entities are processed.
* However, in some instances, for example when generating joins, the CLIENT table needs to be
* the first table as it will the driving table.
* tableNames -> CLIENT, PERSON
*/
tableSpan = naturalOrderTableNames.length;
tableNames = reverse( naturalOrderTableNames, coreTableSpan );
tableKeyColumns = reverse( naturalOrderTableKeyColumns, coreTableSpan );
tableKeyColumnReaders = reverse( naturalOrderTableKeyColumnReaders, coreTableSpan );
tableKeyColumnReaderTemplates = reverse( naturalOrderTableKeyColumnReaderTemplates, coreTableSpan );
subclassTableNameClosure = reverse( naturalOrderSubclassTableNameClosure, coreTableSpan );
subclassTableKeyColumnClosure = reverse( naturalOrderSubclassTableKeyColumnClosure, coreTableSpan );
spaces = ArrayHelper.join(
tableNames,
ArrayHelper.toStringArray( persistentClass.getSynchronizedTables() )
);
// Custom sql
customSQLInsert = new String[tableSpan];
customSQLUpdate = new String[tableSpan];
customSQLDelete = new String[tableSpan];
insertCallable = new boolean[tableSpan];
updateCallable = new boolean[tableSpan];
deleteCallable = new boolean[tableSpan];
insertResultCheckStyles = new ExecuteUpdateResultCheckStyle[tableSpan];
updateResultCheckStyles = new ExecuteUpdateResultCheckStyle[tableSpan];
deleteResultCheckStyles = new ExecuteUpdateResultCheckStyle[tableSpan];
PersistentClass pc = persistentClass;
int jk = coreTableSpan - 1;
while ( pc != null ) {
customSQLInsert[jk] = pc.getCustomSQLInsert();
insertCallable[jk] = customSQLInsert[jk] != null && pc.isCustomInsertCallable();
insertResultCheckStyles[jk] = pc.getCustomSQLInsertCheckStyle() == null
? ExecuteUpdateResultCheckStyle.determineDefault(
customSQLInsert[jk], insertCallable[jk]
)
: pc.getCustomSQLInsertCheckStyle();
customSQLUpdate[jk] = pc.getCustomSQLUpdate();
updateCallable[jk] = customSQLUpdate[jk] != null && pc.isCustomUpdateCallable();
updateResultCheckStyles[jk] = pc.getCustomSQLUpdateCheckStyle() == null
? ExecuteUpdateResultCheckStyle.determineDefault( customSQLUpdate[jk], updateCallable[jk] )
: pc.getCustomSQLUpdateCheckStyle();
customSQLDelete[jk] = pc.getCustomSQLDelete();
deleteCallable[jk] = customSQLDelete[jk] != null && pc.isCustomDeleteCallable();
deleteResultCheckStyles[jk] = pc.getCustomSQLDeleteCheckStyle() == null
? ExecuteUpdateResultCheckStyle.determineDefault( customSQLDelete[jk], deleteCallable[jk] )
: pc.getCustomSQLDeleteCheckStyle();
jk--;
pc = pc.getSuperclass();
}
if ( jk != -1 ) {
throw new AssertionFailure( "Tablespan does not match height of joined-subclass hiearchy." );
}
joinIter = persistentClass.getJoinClosureIterator();
int j = coreTableSpan;
while ( joinIter.hasNext() ) {
Join join = (Join) joinIter.next();
customSQLInsert[j] = join.getCustomSQLInsert();
insertCallable[j] = customSQLInsert[j] != null && join.isCustomInsertCallable();
insertResultCheckStyles[j] = join.getCustomSQLInsertCheckStyle() == null
? ExecuteUpdateResultCheckStyle.determineDefault( customSQLInsert[j], insertCallable[j] )
: join.getCustomSQLInsertCheckStyle();
customSQLUpdate[j] = join.getCustomSQLUpdate();
updateCallable[j] = customSQLUpdate[j] != null && join.isCustomUpdateCallable();
updateResultCheckStyles[j] = join.getCustomSQLUpdateCheckStyle() == null
? ExecuteUpdateResultCheckStyle.determineDefault( customSQLUpdate[j], updateCallable[j] )
: join.getCustomSQLUpdateCheckStyle();
customSQLDelete[j] = join.getCustomSQLDelete();
deleteCallable[j] = customSQLDelete[j] != null && join.isCustomDeleteCallable();
deleteResultCheckStyles[j] = join.getCustomSQLDeleteCheckStyle() == null
? ExecuteUpdateResultCheckStyle.determineDefault( customSQLDelete[j], deleteCallable[j] )
: join.getCustomSQLDeleteCheckStyle();
j++;
}
// PROPERTIES
int hydrateSpan = getPropertySpan();
naturalOrderPropertyTableNumbers = new int[hydrateSpan];
propertyTableNumbers = new int[hydrateSpan];
Iterator iter = persistentClass.getPropertyClosureIterator();
int i = 0;
while ( iter.hasNext() ) {
Property prop = (Property) iter.next();
String tabname = prop.getValue().getTable().getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
);
propertyTableNumbers[i] = getTableId( tabname, tableNames );
naturalOrderPropertyTableNumbers[i] = getTableId( tabname, naturalOrderTableNames );
i++;
}
// subclass closure properties
//TODO: code duplication with SingleTableEntityPersister
ArrayList columnTableNumbers = new ArrayList();
ArrayList formulaTableNumbers = new ArrayList();
ArrayList propTableNumbers = new ArrayList();
iter = persistentClass.getSubclassPropertyClosureIterator();
while ( iter.hasNext() ) {
Property prop = (Property) iter.next();
Table tab = prop.getValue().getTable();
String tabname = tab.getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
);
Integer tabnum = getTableId( tabname, subclassTableNameClosure );
propTableNumbers.add( tabnum );
Iterator citer = prop.getColumnIterator();
while ( citer.hasNext() ) {
Selectable thing = (Selectable) citer.next();
if ( thing.isFormula() ) {
formulaTableNumbers.add( tabnum );
}
else {
columnTableNumbers.add( tabnum );
}
}
}
subclassColumnTableNumberClosure = ArrayHelper.toIntArray( columnTableNumbers );
subclassPropertyTableNumberClosure = ArrayHelper.toIntArray( propTableNumbers );
subclassFormulaTableNumberClosure = ArrayHelper.toIntArray( formulaTableNumbers );
// SUBCLASSES
int subclassSpan = persistentClass.getSubclassSpan() + 1;
subclassClosure = new String[subclassSpan];
subclassClosure[subclassSpan - 1] = getEntityName();
if ( persistentClass.isPolymorphic() ) {
subclassesByDiscriminatorValue.put( discriminatorValue, getEntityName() );
discriminatorValues = new String[subclassSpan];
discriminatorValues[subclassSpan - 1] = discriminatorSQLString;
notNullColumnTableNumbers = new int[subclassSpan];
final int id = getTableId(
persistentClass.getTable().getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
),
subclassTableNameClosure
);
notNullColumnTableNumbers[subclassSpan - 1] = id;
notNullColumnNames = new String[subclassSpan];
notNullColumnNames[subclassSpan - 1] = subclassTableKeyColumnClosure[id][0]; //( (Column) model.getTable().getPrimaryKey().getColumnIterator().next() ).getName();
}
else {
discriminatorValues = null;
notNullColumnTableNumbers = null;
notNullColumnNames = null;
}
iter = persistentClass.getSubclassIterator();
int k = 0;
while ( iter.hasNext() ) {
Subclass sc = (Subclass) iter.next();
subclassClosure[k] = sc.getEntityName();
try {
if ( persistentClass.isPolymorphic() ) {
// we now use subclass ids that are consistent across all
// persisters for a class hierarchy, so that the use of
// "foo.class = Bar" works in HQL
Integer subclassId = sc.getSubclassId();
subclassesByDiscriminatorValue.put( subclassId, sc.getEntityName() );
discriminatorValues[k] = subclassId.toString();
int id = getTableId(
sc.getTable().getQualifiedName(
factory.getDialect(),
factory.getSettings().getDefaultCatalogName(),
factory.getSettings().getDefaultSchemaName()
),
subclassTableNameClosure
);
notNullColumnTableNumbers[k] = id;
notNullColumnNames[k] = subclassTableKeyColumnClosure[id][0]; //( (Column) sc.getTable().getPrimaryKey().getColumnIterator().next() ).getName();
}
}
catch ( Exception e ) {
throw new MappingException( "Error parsing discriminator value", e );
}
k++;
}
initLockers();
initSubclassPropertyAliasesMap( persistentClass );
postConstruct( mapping );
}
public JoinedSubclassEntityPersister(
final EntityBinding entityBinding,
final EntityRegionAccessStrategy cacheAccessStrategy,
final NaturalIdRegionAccessStrategy naturalIdRegionAccessStrategy,
final SessionFactoryImplementor factory,
final Mapping mapping) throws HibernateException {
super( entityBinding, cacheAccessStrategy, naturalIdRegionAccessStrategy, factory );
// TODO: implement!!! initializing final fields to null to make compiler happy
tableSpan = -1;
tableNames = null;
naturalOrderTableNames = null;
tableKeyColumns = null;
tableKeyColumnReaders = null;
tableKeyColumnReaderTemplates = null;
naturalOrderTableKeyColumns = null;
naturalOrderTableKeyColumnReaders = null;
naturalOrderTableKeyColumnReaderTemplates = null;
naturalOrderCascadeDeleteEnabled = null;
spaces = null;
subclassClosure = null;
subclassTableNameClosure = null;
subclassTableKeyColumnClosure = null;
isClassOrSuperclassTable = null;
naturalOrderPropertyTableNumbers = null;
propertyTableNumbers = null;
subclassPropertyTableNumberClosure = null;
subclassColumnTableNumberClosure = null;
subclassFormulaTableNumberClosure = null;
subclassTableSequentialSelect = null;
subclassTableIsLazyClosure = null;
discriminatorValues = null;
notNullColumnNames = null;
notNullColumnTableNumbers = null;
constraintOrderedTableNames = null;
constraintOrderedKeyColumnNames = null;
discriminatorValue = null;
discriminatorSQLString = null;
coreTableSpan = -1;
isNullableTable = null;
}
protected boolean isNullableTable(int j) {
if ( j < coreTableSpan ) {
return false;
}
return isNullableTable[j - coreTableSpan];
}
protected boolean isSubclassTableSequentialSelect(int j) {
return subclassTableSequentialSelect[j] && !isClassOrSuperclassTable[j];
}
/*public void postInstantiate() throws MappingException {
super.postInstantiate();
//TODO: other lock modes?
loader = createEntityLoader(LockMode.NONE, CollectionHelper.EMPTY_MAP);
}*/
public String getSubclassPropertyTableName(int i) {
return subclassTableNameClosure[subclassPropertyTableNumberClosure[i]];
}
public Type getDiscriminatorType() {
return StandardBasicTypes.INTEGER;
}
public Object getDiscriminatorValue() {
return discriminatorValue;
}
public String getDiscriminatorSQLValue() {
return discriminatorSQLString;
}
public String getSubclassForDiscriminatorValue(Object value) {
return (String) subclassesByDiscriminatorValue.get( value );
}
public Serializable[] getPropertySpaces() {
return spaces; // don't need subclass tables, because they can't appear in conditions
}
protected String getTableName(int j) {
return naturalOrderTableNames[j];
}
protected String[] getKeyColumns(int j) {
return naturalOrderTableKeyColumns[j];
}
protected boolean isTableCascadeDeleteEnabled(int j) {
return naturalOrderCascadeDeleteEnabled[j];
}
protected boolean isPropertyOfTable(int property, int j) {
return naturalOrderPropertyTableNumbers[property] == j;
}
/**
* Load an instance using either the <tt>forUpdateLoader</tt> or the outer joining <tt>loader</tt>,
* depending upon the value of the <tt>lock</tt> parameter
*/
/*public Object load(Serializable id, Object optionalObject, LockMode lockMode, SessionImplementor session)
throws HibernateException {
if ( log.isTraceEnabled() ) log.trace( "Materializing entity: " + MessageHelper.infoString(this, id) );
final UniqueEntityLoader loader = hasQueryLoader() ?
getQueryLoader() :
this.loader;
try {
final Object result = loader.load(id, optionalObject, session);
if (result!=null) lock(id, getVersion(result), result, lockMode, session);
return result;
}
catch (SQLException sqle) {
throw new JDBCException( "could not load by id: " + MessageHelper.infoString(this, id), sqle );
}
}*/
private static final void reverse(Object[] objects, int len) {
Object[] temp = new Object[len];
for ( int i = 0; i < len; i++ ) {
temp[i] = objects[len - i - 1];
}
for ( int i = 0; i < len; i++ ) {
objects[i] = temp[i];
}
}
/**
* Reverse the first n elements of the incoming array
*
* @param objects
* @param n
*
* @return New array with the first n elements in reversed order
*/
private static String[] reverse(String[] objects, int n) {
int size = objects.length;
String[] temp = new String[size];
for ( int i = 0; i < n; i++ ) {
temp[i] = objects[n - i - 1];
}
for ( int i = n; i < size; i++ ) {
temp[i] = objects[i];
}
return temp;
}
/**
* Reverse the first n elements of the incoming array
*
* @param objects
* @param n
*
* @return New array with the first n elements in reversed order
*/
private static String[][] reverse(String[][] objects, int n) {
int size = objects.length;
String[][] temp = new String[size][];
for ( int i = 0; i < n; i++ ) {
temp[i] = objects[n - i - 1];
}
for ( int i = n; i < size; i++ ) {
temp[i] = objects[i];
}
return temp;
}
public String fromTableFragment(String alias) {
return getTableName() + ' ' + alias;
}
public String getTableName() {
return tableNames[0];
}
public void addDiscriminatorToSelect(SelectFragment select, String name, String suffix) {
if ( hasSubclasses() ) {
select.setExtraSelectList( discriminatorFragment( name ), getDiscriminatorAlias() );
}
}
private CaseFragment discriminatorFragment(String alias) {
CaseFragment cases = getFactory().getDialect().createCaseFragment();
for ( int i = 0; i < discriminatorValues.length; i++ ) {
cases.addWhenColumnNotNull(
generateTableAlias( alias, notNullColumnTableNumbers[i] ),
notNullColumnNames[i],
discriminatorValues[i]
);
}
return cases;
}
public String filterFragment(String alias) {
return hasWhere() ?
" and " + getSQLWhereString( generateFilterConditionAlias( alias ) ) :
"";
}
public String generateFilterConditionAlias(String rootAlias) {
return generateTableAlias( rootAlias, tableSpan - 1 );
}
public String[] getIdentifierColumnNames() {
return tableKeyColumns[0];
}
public String[] getIdentifierColumnReaderTemplates() {
return tableKeyColumnReaderTemplates[0];
}
public String[] getIdentifierColumnReaders() {
return tableKeyColumnReaders[0];
}
public String[] toColumns(String alias, String propertyName) throws QueryException {
if ( ENTITY_CLASS.equals( propertyName ) ) {
// This doesn't actually seem to work but it *might*
// work on some dbs. Also it doesn't work if there
// are multiple columns of results because it
// is not accounting for the suffix:
// return new String[] { getDiscriminatorColumnName() };
return new String[] { discriminatorFragment( alias ).toFragmentString() };
}
else {
return super.toColumns( alias, propertyName );
}
}
protected int[] getPropertyTableNumbersInSelect() {
return propertyTableNumbers;
}
protected int getSubclassPropertyTableNumber(int i) {
return subclassPropertyTableNumberClosure[i];
}
public int getTableSpan() {
return tableSpan;
}
public boolean isMultiTable() {
return true;
}
protected int[] getSubclassColumnTableNumberClosure() {
return subclassColumnTableNumberClosure;
}
protected int[] getSubclassFormulaTableNumberClosure() {
return subclassFormulaTableNumberClosure;
}
protected int[] getPropertyTableNumbers() {
return naturalOrderPropertyTableNumbers;
}
protected String[] getSubclassTableKeyColumns(int j) {
return subclassTableKeyColumnClosure[j];
}
public String getSubclassTableName(int j) {
return subclassTableNameClosure[j];
}
public int getSubclassTableSpan() {
return subclassTableNameClosure.length;
}
protected boolean isSubclassTableLazy(int j) {
return subclassTableIsLazyClosure[j];
}
protected boolean isClassOrSuperclassTable(int j) {
return isClassOrSuperclassTable[j];
}
public String getPropertyTableName(String propertyName) {
Integer index = getEntityMetamodel().getPropertyIndexOrNull( propertyName );
if ( index == null ) {
return null;
}
return tableNames[propertyTableNumbers[index.intValue()]];
}
public String[] getConstraintOrderedTableNameClosure() {
return constraintOrderedTableNames;
}
public String[][] getContraintOrderedTableKeyColumnClosure() {
return constraintOrderedKeyColumnNames;
}
public String getRootTableName() {
return naturalOrderTableNames[0];
}
public String getRootTableAlias(String drivingAlias) {
return generateTableAlias( drivingAlias, getTableId( getRootTableName(), tableNames ) );
}
public Declarer getSubclassPropertyDeclarer(String propertyPath) {
if ( "class".equals( propertyPath ) ) {
// special case where we need to force include all subclass joins
return Declarer.SUBCLASS;
}
return super.getSubclassPropertyDeclarer( propertyPath );
}
@Override
public int determineTableNumberForColumn(String columnName) {
final String[] subclassColumnNameClosure = getSubclassColumnClosure();
for ( int i = 0, max = subclassColumnNameClosure.length; i < max; i++ ) {
final boolean quoted = subclassColumnNameClosure[i].startsWith( "\"" )
&& subclassColumnNameClosure[i].endsWith( "\"" );
if ( quoted ) {
if ( subclassColumnNameClosure[i].equals( columnName ) ) {
return getSubclassColumnTableNumberClosure()[i];
}
}
else {
if ( subclassColumnNameClosure[i].equalsIgnoreCase( columnName ) ) {
return getSubclassColumnTableNumberClosure()[i];
}
}
}
throw new HibernateException( "Could not locate table which owns column [" + columnName + "] referenced in order-by mapping" );
}
@Override
public FilterAliasGenerator getFilterAliasGenerator(String rootAlias) {
return new DynamicFilterAliasGenerator(subclassTableNameClosure, rootAlias);
}
}
|
|
/*
* Pentaho Data Integration
*
* Copyright (C) 2002-2018 by Hitachi Vantara : http://www.pentaho.com
*
* **************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.pentaho.di.ui.core.dialog;
import org.eclipse.swt.SWT;
import org.eclipse.swt.browser.Browser;
import org.eclipse.swt.browser.LocationEvent;
import org.eclipse.swt.browser.LocationListener;
import org.eclipse.swt.browser.ProgressEvent;
import org.eclipse.swt.browser.ProgressListener;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.events.SelectionListener;
import org.eclipse.swt.events.ShellAdapter;
import org.eclipse.swt.events.ShellEvent;
import org.eclipse.swt.graphics.Color;
import org.eclipse.swt.graphics.Cursor;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.layout.FormAttachment;
import org.eclipse.swt.layout.FormData;
import org.eclipse.swt.layout.FormLayout;
import org.eclipse.swt.program.Program;
import org.eclipse.swt.widgets.Dialog;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.swt.widgets.Text;
import org.eclipse.swt.widgets.ToolBar;
import org.eclipse.swt.widgets.ToolItem;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.ui.core.PropsUI;
import org.pentaho.di.ui.core.gui.GUIResource;
import org.pentaho.di.ui.spoon.Spoon;
import org.pentaho.di.ui.trans.step.BaseStepDialog;
import java.net.MalformedURLException;
import java.net.URL;
public class ShowHelpDialog extends Dialog {
private static Class<?> PKG = Spoon.class;
private static final String DOC_URL = Spoon.DOCUMENTATION_URL;
private static final String PREFIX = "https://help";
private static final String PRINT_PREFIX = "https://f1.help";
private static final String PRINT_SCRIPT = "javascript:window.print();";
private static final int TOOLBAR_HEIGHT = 25;
private static final int TOOL_ITEM_WIDTH = 47;
private static final int TOOL_ITEM_SPACING = 4;
private static final int MARGIN = 5;
private boolean fromPrint;
private String dialogTitle;
private String url;
private String homeURL;
private Browser wBrowser;
private ToolBar toolBarBack;
private ToolItem tltmBack;
private ToolBar toolBarForward;
private ToolItem tltmForward;
private ToolItem tltmRefresh;
private ToolItem tltmHome;
private ToolItem tltmPrint;
private Image imageBackEnabled;
private Image imageBackDisabled;
private Image imageForwardEnabled;
private Image imageForwardDisabled;
private Image imageRefreshEnabled;
private Image imageRefreshDisabled;
private Image imageHomeEnabled;
private Image imageHomeDisabled;
private Image imagePrintEnabled;
private Image imagePrintDisabled;
private Text textURL;
private Cursor cursorEnabled;
private Cursor cursorDisabled;
private Shell shell;
private Display display;
private PropsUI props;
public ShowHelpDialog( Shell parent, String dialogTitle, String url, String header ) {
super( parent, SWT.NONE );
props = PropsUI.getInstance();
this.dialogTitle = BaseMessages.getString( PKG, "Spoon.Documentation.Pentaho.Title" );
this.url = url;
try {
this.homeURL = new URL( DOC_URL ).toString();
} catch ( MalformedURLException e ) {
}
}
public ShowHelpDialog( Shell parent, String dialogTitle, String url ) {
this( parent, dialogTitle, url, "" );
}
protected Shell createShell( Shell parent ) {
return new Shell( parent, SWT.RESIZE | SWT.MAX | SWT.MIN | SWT.DIALOG_TRIM );
}
public void open() {
Shell parent = getParent();
display = parent.getDisplay();
shell = createShell( parent );
shell.setImage( GUIResource.getInstance().getImageSpoon() );
props.setLook( shell );
FormLayout formLayout = new FormLayout();
shell.setLayout( formLayout );
shell.setText( dialogTitle );
//Set Images
setImages();
// Canvas
wBrowser = new Browser( shell, SWT.NONE );
props.setLook( wBrowser );
FormData fdBrowser = new FormData();
fdBrowser.top = new FormAttachment( 0, TOOLBAR_HEIGHT + MARGIN );
fdBrowser.right = new FormAttachment( 100, 0 );
fdBrowser.bottom = new FormAttachment( 100, 0 );
fdBrowser.left = new FormAttachment( 0, 0 );
wBrowser.setLayoutData( fdBrowser );
toolBarBack = new ToolBar( shell, SWT.FLAT );
FormData fdtoolBarBack = new FormData();
fdtoolBarBack.top = new FormAttachment( 0, MARGIN );
fdtoolBarBack.right = new FormAttachment( 0, 27 );
fdtoolBarBack.bottom = new FormAttachment( 0, TOOLBAR_HEIGHT );
fdtoolBarBack.left = new FormAttachment( 0, MARGIN + 1 );
toolBarBack.setLayoutData( fdtoolBarBack );
toolBarBack.setCursor( cursorDisabled );
toolBarBack.setBackground( toolBarBack.getParent().getBackground() );
tltmBack = new ToolItem( toolBarBack, SWT.NONE );
tltmBack.setImage( imageBackEnabled );
tltmBack.setDisabledImage( imageBackDisabled );
tltmBack.setToolTipText( BaseMessages.getString( PKG, "Spoon.Documentation.Tooltip.Back" ) );
tltmBack.setEnabled( false );
toolBarForward = new ToolBar( shell, SWT.FLAT );
FormData fdtoolBarForward = new FormData();
fdtoolBarForward.top = new FormAttachment( 0, MARGIN );
fdtoolBarForward.right = new FormAttachment( toolBarBack, TOOL_ITEM_WIDTH );
fdtoolBarForward.bottom = new FormAttachment( 0, TOOLBAR_HEIGHT );
fdtoolBarForward.left = new FormAttachment( toolBarBack, TOOL_ITEM_SPACING );
toolBarForward.setLayoutData( fdtoolBarForward );
toolBarForward.setCursor( cursorDisabled );
toolBarForward.setBackground( toolBarForward.getParent().getBackground() );
tltmForward = new ToolItem( toolBarForward, SWT.NONE );
tltmForward.setImage( imageForwardEnabled );
tltmForward.setDisabledImage( imageForwardDisabled );
tltmForward.setToolTipText( BaseMessages.getString( PKG, "Spoon.Documentation.Tooltip.Forward" ) );
tltmForward.setEnabled( false );
ToolBar toolBarRefresh = new ToolBar( shell, SWT.FLAT );
FormData fdtoolBarRefresh = new FormData();
fdtoolBarRefresh.top = new FormAttachment( 0, MARGIN );
fdtoolBarRefresh.right = new FormAttachment( toolBarForward, TOOL_ITEM_WIDTH );
fdtoolBarRefresh.bottom = new FormAttachment( 0, TOOLBAR_HEIGHT );
fdtoolBarRefresh.left = new FormAttachment( toolBarForward, TOOL_ITEM_SPACING - 1 );
toolBarRefresh.setLayoutData( fdtoolBarRefresh );
toolBarRefresh.setCursor( cursorEnabled );
toolBarRefresh.setBackground( toolBarRefresh.getParent().getBackground() );
tltmRefresh = new ToolItem( toolBarRefresh, SWT.NONE );
tltmRefresh.setImage( imageRefreshEnabled );
tltmRefresh.setDisabledImage( imageRefreshDisabled );
tltmRefresh.setToolTipText( BaseMessages.getString( PKG, "Spoon.Documentation.Tooltip.Refresh" ) );
tltmRefresh.setEnabled( true );
ToolBar toolBarHome = new ToolBar( shell, SWT.FLAT );
FormData fdtoolBarHome = new FormData();
fdtoolBarHome.top = new FormAttachment( 0, MARGIN );
fdtoolBarHome.right = new FormAttachment( toolBarRefresh, TOOL_ITEM_WIDTH );
fdtoolBarHome.bottom = new FormAttachment( 0, TOOLBAR_HEIGHT );
fdtoolBarHome.left = new FormAttachment( toolBarRefresh, TOOL_ITEM_SPACING );
toolBarHome.setLayoutData( fdtoolBarHome );
toolBarHome.setCursor( cursorEnabled );
toolBarHome.setBackground( toolBarHome.getParent().getBackground() );
tltmHome = new ToolItem( toolBarHome, SWT.NONE );
tltmHome.setImage( imageHomeEnabled );
tltmHome.setDisabledImage( imageHomeDisabled );
tltmHome.setToolTipText( BaseMessages.getString( PKG, "Spoon.Documentation.Tooltip.Home" ) );
tltmHome.setEnabled( true );
ToolBar toolBarPrint = new ToolBar( shell, SWT.FLAT );
FormData fdtoolBarPrint = new FormData();
fdtoolBarPrint.top = new FormAttachment( 0, MARGIN );
fdtoolBarPrint.right = new FormAttachment( 100, -7 );
fdtoolBarPrint.bottom = new FormAttachment( 0, TOOLBAR_HEIGHT );
toolBarPrint.setLayoutData( fdtoolBarPrint );
toolBarPrint.setCursor( cursorEnabled );
toolBarPrint.setBackground( toolBarPrint.getParent().getBackground() );
tltmPrint = new ToolItem( toolBarPrint, SWT.NONE );
tltmPrint.setImage( imagePrintEnabled );
tltmPrint.setDisabledImage( imagePrintDisabled );
tltmPrint.setToolTipText( BaseMessages.getString( PKG, "Spoon.Documentation.Tooltip.Print" ) );
tltmPrint.setEnabled( true );
textURL = new Text( shell, SWT.BORDER );
FormData fdtext = new FormData();
fdtext.top = new FormAttachment( 0, MARGIN );
fdtext.right = new FormAttachment( toolBarPrint, -7 );
fdtext.bottom = new FormAttachment( 0, 25 );
fdtext.left = new FormAttachment( toolBarHome, TOOL_ITEM_SPACING );
textURL.setLayoutData( fdtext );
textURL.setForeground( new Color( display, 101, 101, 101 ) );
wBrowser.setUrl( url );
setUpListeners();
// Specs are 760/530, but due to rendering differences, we need to adjust the actual hgt/wdt used
BaseStepDialog.setSize( shell, 755, 538, true );
shell.setMinimumSize( 515, 408 );
shell.open();
while ( !shell.isDisposed() ) {
if ( !display.readAndDispatch() ) {
display.sleep();
}
}
}
private void setImages() {
imageBackEnabled = GUIResource.getInstance().getImageBackEnabled();
imageBackDisabled = GUIResource.getInstance().getImageBackDisabled();
imageForwardEnabled = GUIResource.getInstance().getImageForwardEnabled();
imageForwardDisabled = GUIResource.getInstance().getImageForwardDisabled();
imageRefreshEnabled = GUIResource.getInstance().getImageRefreshEnabled();
imageRefreshDisabled = GUIResource.getInstance().getImageRefreshDisabled();
imageHomeEnabled = GUIResource.getInstance().getImageHomeEnabled();
imageHomeDisabled = GUIResource.getInstance().getImageHomeDisabled();
imagePrintEnabled = GUIResource.getInstance().getImagePrintEnabled();
imagePrintDisabled = GUIResource.getInstance().getImagePrintDisabled();
cursorEnabled = new Cursor( display, SWT.CURSOR_HAND );
cursorDisabled = new Cursor( display, SWT.CURSOR_ARROW );
}
private void setUpListeners() {
setUpSelectionListeners();
addProgressAndLocationListener();
addShellListener();
}
private void setUpSelectionListeners() {
SelectionListener selectionListenerBack = new SelectionListener() {
@Override
public void widgetSelected( SelectionEvent arg0 ) {
back();
}
@Override
public void widgetDefaultSelected( SelectionEvent arg0 ) {
}
};
SelectionListener selectionListenerForward = new SelectionListener() {
@Override
public void widgetSelected( SelectionEvent arg0 ) {
forward();
}
@Override
public void widgetDefaultSelected( SelectionEvent arg0 ) {
}
};
SelectionListener selectionListenerRefresh = new SelectionListener() {
@Override
public void widgetSelected( SelectionEvent arg0 ) {
refresh();
}
@Override
public void widgetDefaultSelected( SelectionEvent arg0 ) {
}
};
SelectionListener selectionListenerHome = new SelectionListener() {
@Override
public void widgetSelected( SelectionEvent arg0 ) {
home();
}
@Override
public void widgetDefaultSelected( SelectionEvent arg0 ) {
}
};
SelectionListener selectionListenerPrint = new SelectionListener() {
@Override
public void widgetSelected( SelectionEvent arg0 ) {
print();
}
@Override
public void widgetDefaultSelected( SelectionEvent arg0 ) {
}
};
tltmBack.addSelectionListener( selectionListenerBack );
tltmForward.addSelectionListener( selectionListenerForward );
tltmRefresh.addSelectionListener( selectionListenerRefresh );
tltmHome.addSelectionListener( selectionListenerHome );
tltmPrint.addSelectionListener( selectionListenerPrint );
}
private void addProgressAndLocationListener() {
ProgressListener progressListener = new ProgressListener() {
@Override
public void changed( ProgressEvent event ) {
}
@Override
public void completed( ProgressEvent event ) {
if ( fromPrint ) {
wBrowser.execute( PRINT_SCRIPT );
fromPrint = false;
}
setForwardBackEnable();
}
};
LocationListener listener = new LocationListener() {
@Override
public void changing( LocationEvent event ) {
if ( event.location.endsWith( ".pdf" ) ) {
Program.launch( event.location );
event.doit = false;
}
}
@Override
public void changed( LocationEvent event ) {
textURL.setText( event.location );
}
};
wBrowser.addProgressListener( progressListener );
wBrowser.addLocationListener( listener );
}
private void addShellListener() {
// Detect [X] or ALT-F4 or something that kills this window...
shell.addShellListener( new ShellAdapter() {
public void shellClosed( ShellEvent e ) {
ok();
}
} );
}
private void back() {
wBrowser.back();
}
private void forward() {
wBrowser.forward();
}
private void refresh() {
wBrowser.refresh();
}
private void home() {
wBrowser.setUrl( homeURL != null ? homeURL : url );
}
private void print() {
String printURL = wBrowser.getUrl();
if ( printURL.startsWith( PREFIX ) ) {
printURL = printURL.replace( PREFIX, PRINT_PREFIX );
fromPrint = true;
wBrowser.setUrl( printURL );
} else {
wBrowser.execute( PRINT_SCRIPT );
}
}
private void setForwardBackEnable() {
setBackEnable( wBrowser.isBackEnabled() );
setForwardEnable( wBrowser.isForwardEnabled() );
}
private void setBackEnable( boolean enable ) {
tltmBack.setEnabled( enable );
toolBarBack.setCursor( enable ? cursorEnabled : cursorDisabled );
}
private void setForwardEnable( boolean enable ) {
tltmForward.setEnabled( enable );
toolBarForward.setCursor( enable ? cursorEnabled : cursorDisabled );
}
public void dispose() {
shell.dispose();
}
private void ok() {
dispose();
}
}
|
|
/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jetbrains.python;
import com.intellij.codeInsight.controlflow.ControlFlow;
import com.intellij.codeInsight.controlflow.Instruction;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.VfsUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.jetbrains.python.codeInsight.controlflow.ControlFlowCache;
import com.jetbrains.python.codeInsight.controlflow.ScopeOwner;
import com.jetbrains.python.fixtures.LightMarkedTestCase;
import com.jetbrains.python.fixtures.PyTestCase;
import com.jetbrains.python.psi.LanguageLevel;
import com.jetbrains.python.psi.PyClass;
import com.jetbrains.python.psi.PyFile;
import com.jetbrains.python.psi.PyFunction;
import junit.framework.Assert;
import java.io.IOException;
/**
* @author oleg
*/
public class PyControlFlowBuilderTest extends LightMarkedTestCase {
@Override
public String getTestDataPath() {
return PythonTestUtil.getTestDataPath() + "/codeInsight/controlflow/";
}
private void doTest() {
final String testName = getTestName(false).toLowerCase();
configureByFile(testName + ".py");
final ControlFlow flow = ControlFlowCache.getControlFlow((PyFile)myFile);
final String fullPath = getTestDataPath() + testName + ".txt";
check(fullPath, flow);
}
public void testAssert() {
doTest();
}
public void testAssertFalse() {
doTest();
}
public void testFile() {
doTest();
}
public void testIf() {
doTest();
}
public void testFor() {
doTest();
}
public void testWhile() {
doTest();
}
public void testBreak() {
doTest();
}
public void testContinue() {
doTest();
}
public void testReturn() {
doTest();
}
public void testTry() {
doTest();
}
public void testImport() {
doTest();
}
public void testListComp() {
doTest();
}
public void testAssignment() {
doTest();
}
public void testAssignment2() {
doTest();
}
public void testAugAssignment() {
doTest();
}
public void testSliceAssignment() {
doTest();
}
public void testIfElseReturn() {
doTest();
}
public void testRaise() {
doTest();
}
public void testReturnFor() {
doTest();
}
public void testForIf() {
doTest();
}
public void testForReturn() {
doTest();
}
public void testForTryContinue() {
doTest();
}
public void testTryRaiseFinally() {
doTest();
}
public void testTryExceptElseFinally() {
doTest();
}
public void testTryFinally() {
doTest();
}
public void testDoubleTry() {
doTest();
}
public void testTryTry() {
doTest();
}
public void testIsinstance() {
doTest();
}
public void testLambda() {
doTest();
}
public void testManyIfs() {
doTest();
}
public void testSuperclass() {
doTest();
}
public void testDefaultParameterValue() {
doTest();
}
public void testLambdaDefaultParameter() {
doTest();
}
public void testDecorator() {
doTestFirstStatement();
}
public void testSetComprehension() {
doTest();
}
public void testTypeAnnotations() {
setLanguageLevel(LanguageLevel.PYTHON30);
try {
doTest();
}
finally {
setLanguageLevel(null);
}
}
public void testQualifiedSelfReference() {
final String testName = getTestName(false).toLowerCase();
configureByFile(testName + ".py");
final String fullPath = getTestDataPath() + testName + ".txt";
final PyClass pyClass = ((PyFile) myFile).getTopLevelClasses().get(0);
final ControlFlow flow = ControlFlowCache.getControlFlow(pyClass.getMethods()[0]);
check(fullPath, flow);
}
public void testSelf() {
final String testName = getTestName(false).toLowerCase();
configureByFile(testName + ".py");
final String fullPath = getTestDataPath() + testName + ".txt";
final PyClass pyClass = ((PyFile) myFile).getTopLevelClasses().get(0);
final ControlFlow flow = ControlFlowCache.getControlFlow(pyClass.getMethods()[0]);
check(fullPath, flow);
}
public void testTryBreak() {
final String testName = getTestName(false).toLowerCase();
configureByFile(testName + ".py");
final ControlFlow flow = ControlFlowCache.getControlFlow((PyFunction)((PyFile)myFile).getStatements().get(0));
final String fullPath = getTestDataPath() + testName + ".txt";
check(fullPath, flow);
}
public void testFunction() {
doTestFirstStatement();
}
// PY-7784
public void testAssertFalseArgument() {
doTest();
}
public void testConditionalExpression() {
doTest();
}
private void doTestFirstStatement() {
final String testName = getTestName(false).toLowerCase();
configureByFile(testName + ".py");
final String fullPath = getTestDataPath() + testName + ".txt";
final ControlFlow flow = ControlFlowCache.getControlFlow((ScopeOwner)((PyFile)myFile).getStatements().get(0));
check(fullPath, flow);
}
private static void check(final String fullPath, final ControlFlow flow) {
final StringBuffer buffer = new StringBuffer();
final Instruction[] instructions = flow.getInstructions();
for (Instruction instruction : instructions) {
buffer.append(instruction).append("\n");
}
final VirtualFile vFile = PyTestCase.getVirtualFileByName(fullPath);
try {
final String fileText = StringUtil.convertLineSeparators(VfsUtil.loadText(vFile), "\n");
Assert.assertEquals(fileText.trim(), buffer.toString().trim());
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.devicefarm.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
* <p>
* Returns the transaction log of the specified offerings.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/ListOfferingTransactions"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ListOfferingTransactionsResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* </p>
*/
private java.util.List<OfferingTransaction> offeringTransactions;
/**
* <p>
* An identifier that was returned from the previous call to this operation, which can be used to return the next
* set of items in the list.
* </p>
*/
private String nextToken;
/**
* <p>
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* </p>
*
* @return The audit log of subscriptions you have purchased and modified through AWS Device Farm.
*/
public java.util.List<OfferingTransaction> getOfferingTransactions() {
return offeringTransactions;
}
/**
* <p>
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* </p>
*
* @param offeringTransactions
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
*/
public void setOfferingTransactions(java.util.Collection<OfferingTransaction> offeringTransactions) {
if (offeringTransactions == null) {
this.offeringTransactions = null;
return;
}
this.offeringTransactions = new java.util.ArrayList<OfferingTransaction>(offeringTransactions);
}
/**
* <p>
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setOfferingTransactions(java.util.Collection)} or {@link #withOfferingTransactions(java.util.Collection)}
* if you want to override the existing values.
* </p>
*
* @param offeringTransactions
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListOfferingTransactionsResult withOfferingTransactions(OfferingTransaction... offeringTransactions) {
if (this.offeringTransactions == null) {
setOfferingTransactions(new java.util.ArrayList<OfferingTransaction>(offeringTransactions.length));
}
for (OfferingTransaction ele : offeringTransactions) {
this.offeringTransactions.add(ele);
}
return this;
}
/**
* <p>
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* </p>
*
* @param offeringTransactions
* The audit log of subscriptions you have purchased and modified through AWS Device Farm.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListOfferingTransactionsResult withOfferingTransactions(java.util.Collection<OfferingTransaction> offeringTransactions) {
setOfferingTransactions(offeringTransactions);
return this;
}
/**
* <p>
* An identifier that was returned from the previous call to this operation, which can be used to return the next
* set of items in the list.
* </p>
*
* @param nextToken
* An identifier that was returned from the previous call to this operation, which can be used to return the
* next set of items in the list.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* An identifier that was returned from the previous call to this operation, which can be used to return the next
* set of items in the list.
* </p>
*
* @return An identifier that was returned from the previous call to this operation, which can be used to return the
* next set of items in the list.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* An identifier that was returned from the previous call to this operation, which can be used to return the next
* set of items in the list.
* </p>
*
* @param nextToken
* An identifier that was returned from the previous call to this operation, which can be used to return the
* next set of items in the list.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListOfferingTransactionsResult withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getOfferingTransactions() != null)
sb.append("OfferingTransactions: ").append(getOfferingTransactions()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListOfferingTransactionsResult == false)
return false;
ListOfferingTransactionsResult other = (ListOfferingTransactionsResult) obj;
if (other.getOfferingTransactions() == null ^ this.getOfferingTransactions() == null)
return false;
if (other.getOfferingTransactions() != null && other.getOfferingTransactions().equals(this.getOfferingTransactions()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getOfferingTransactions() == null) ? 0 : getOfferingTransactions().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public ListOfferingTransactionsResult clone() {
try {
return (ListOfferingTransactionsResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jclouds.compute.stub.config;
import static com.google.common.base.Predicates.in;
import static com.google.common.collect.Iterables.find;
import static com.google.common.collect.Maps.filterKeys;
import static org.jclouds.compute.util.ComputeServiceUtils.formatStatus;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.jclouds.Constants;
import org.jclouds.compute.JCloudsNativeComputeServiceAdapter;
import org.jclouds.compute.domain.Hardware;
import org.jclouds.compute.domain.Image;
import org.jclouds.compute.domain.ImageBuilder;
import org.jclouds.compute.domain.NodeMetadata;
import org.jclouds.compute.domain.NodeMetadata.Status;
import org.jclouds.compute.domain.NodeMetadataBuilder;
import org.jclouds.compute.domain.OperatingSystem;
import org.jclouds.compute.domain.OsFamily;
import org.jclouds.compute.domain.SecurityGroup;
import org.jclouds.compute.domain.Template;
import org.jclouds.compute.extensions.SecurityGroupExtension;
import org.jclouds.compute.predicates.ImagePredicates;
import org.jclouds.domain.Location;
import org.jclouds.domain.LoginCredentials;
import org.jclouds.location.suppliers.all.JustProvider;
import org.jclouds.rest.ResourceNotFoundException;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableList.Builder;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Multimap;
import com.google.common.util.concurrent.ListeningExecutorService;
@Singleton
public class StubComputeServiceAdapter implements JCloudsNativeComputeServiceAdapter {
private final Supplier<Location> location;
private final ConcurrentMap<String, NodeMetadata> nodes;
private final Multimap<String, SecurityGroup> groupsForNodes;
private final ListeningExecutorService executor;
private final Provider<Integer> idProvider;
private final String publicIpPrefix;
private final String privateIpPrefix;
private final String passwordPrefix;
private final Supplier<Set<? extends Location>> locationSupplier;
private final Map<OsFamily, Map<String, String>> osToVersionMap;
private final Optional<SecurityGroupExtension> securityGroupExtension;
@Inject
public StubComputeServiceAdapter(ConcurrentMap<String, NodeMetadata> nodes,
@Named(Constants.PROPERTY_USER_THREADS) ListeningExecutorService executor, Supplier<Location> location,
@Named("NODE_ID") Provider<Integer> idProvider, @Named("PUBLIC_IP_PREFIX") String publicIpPrefix,
@Named("PRIVATE_IP_PREFIX") String privateIpPrefix, @Named("PASSWORD_PREFIX") String passwordPrefix,
JustProvider locationSupplier, Map<OsFamily, Map<String, String>> osToVersionMap,
Multimap<String, SecurityGroup> groupsForNodes, Optional<SecurityGroupExtension> securityGroupExtension) {
this.nodes = nodes;
this.executor = executor;
this.location = location;
this.idProvider = idProvider;
this.publicIpPrefix = publicIpPrefix;
this.privateIpPrefix = privateIpPrefix;
this.passwordPrefix = passwordPrefix;
this.locationSupplier = locationSupplier;
this.osToVersionMap = osToVersionMap;
this.groupsForNodes = groupsForNodes;
this.securityGroupExtension = securityGroupExtension;
}
protected void setStateOnNode(Status status, NodeMetadata node) {
nodes.put(node.getId(), NodeMetadataBuilder.fromNodeMetadata(node).status(status).build());
}
protected void setStateOnNodeAfterDelay(final Status status, final NodeMetadata node, final long millis) {
if (millis == 0l)
setStateOnNode(status, node);
else
executor.execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
Throwables.propagate(e);
}
setStateOnNode(status, node);
}
});
}
@Override
public NodeWithInitialCredentials createNodeWithGroupEncodedIntoName(String group, String name, Template template) {
NodeMetadataBuilder builder = new NodeMetadataBuilder();
String id = idProvider.get() + "";
builder.ids(id);
builder.name(name);
// using a predictable name so tests will pass
builder.hostname(group);
builder.tags(template.getOptions().getTags());
builder.userMetadata(template.getOptions().getUserMetadata());
builder.group(group);
builder.location(location.get());
builder.imageId(template.getImage().getId());
builder.operatingSystem(template.getImage().getOperatingSystem());
builder.status(Status.PENDING);
builder.publicAddresses(ImmutableSet.<String> of(publicIpPrefix + id));
builder.privateAddresses(ImmutableSet.<String> of(privateIpPrefix + id));
builder.credentials(LoginCredentials.builder().user("root").password(passwordPrefix + id).build());
NodeMetadata node = builder.build();
nodes.put(node.getId(), node);
if (!template.getOptions().getGroups().isEmpty()) {
final String groupId = Iterables.getFirst(template.getOptions().getGroups(), "0");
Optional<SecurityGroup> secGroup = Iterables.tryFind(securityGroupExtension.get().listSecurityGroups(),
new Predicate<SecurityGroup>() {
@Override
public boolean apply(SecurityGroup input) {
return input.getId().equals(groupId);
}
});
if (secGroup.isPresent()) {
groupsForNodes.put(node.getId(), secGroup.get());
}
}
setStateOnNodeAfterDelay(Status.RUNNING, node, 100);
return new NodeWithInitialCredentials(node);
}
@Override
public Iterable<Hardware> listHardwareProfiles() {
return ImmutableSet.<Hardware> of(StubComputeServiceDependenciesModule.stub("small", 1, 1740, 160),
StubComputeServiceDependenciesModule.stub("medium", 4, 7680, 850),
StubComputeServiceDependenciesModule.stub("large", 8, 15360, 1690));
}
@Override
public Iterable<Image> listImages() {
// initializing as a List, as ImmutableSet does not allow you to put
// duplicates
Builder<Image> images = ImmutableList.builder();
int id = 1;
for (boolean is64Bit : new boolean[] { true, false })
for (Entry<OsFamily, Map<String, String>> osVersions : this.osToVersionMap.entrySet()) {
for (String version : ImmutableSet.copyOf(osVersions.getValue().values())) {
String desc = String.format("stub %s %s", osVersions.getKey(), is64Bit);
images.add(new ImageBuilder().ids(id++ + "").name(osVersions.getKey().name()).location(location.get())
.operatingSystem(new OperatingSystem(osVersions.getKey(), desc, version, null, desc, is64Bit))
.description(desc).status(Image.Status.AVAILABLE).build());
}
}
return images.build();
}
@Override
public Image getImage(String id) {
return find(listImages(), ImagePredicates.idEquals(id), null);
}
@Override
public Iterable<NodeMetadata> listNodes() {
return nodes.values();
}
@Override
public Iterable<NodeMetadata> listNodesByIds(Iterable<String> ids) {
return filterKeys(nodes, in(ImmutableSet.copyOf(ids))).values();
}
@SuppressWarnings("unchecked")
@Override
public Iterable<Location> listLocations() {
return (Iterable<Location>) locationSupplier.get();
}
@Override
public NodeMetadata getNode(String id) {
return nodes.get(id);
}
@Override
public void destroyNode(final String id) {
NodeMetadata node = nodes.get(id);
if (node == null)
return;
setStateOnNodeAfterDelay(Status.PENDING, node, 0);
setStateOnNodeAfterDelay(Status.TERMINATED, node, 50);
groupsForNodes.removeAll(id);
executor.execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(200);
} catch (InterruptedException e) {
Throwables.propagate(e);
} finally {
nodes.remove(id);
}
}
});
}
@Override
public void rebootNode(String id) {
NodeMetadata node = nodes.get(id);
if (node == null)
throw new ResourceNotFoundException("node not found: " + id);
setStateOnNode(Status.PENDING, node);
setStateOnNodeAfterDelay(Status.RUNNING, node, 50);
}
@Override
public void resumeNode(String id) {
NodeMetadata node = nodes.get(id);
if (node == null)
throw new ResourceNotFoundException("node not found: " + id);
if (node.getStatus() == Status.RUNNING)
return;
if (node.getStatus() != Status.SUSPENDED)
throw new IllegalStateException("to resume a node, it must be in suspended status, not: " + formatStatus(node));
setStateOnNode(Status.PENDING, node);
setStateOnNodeAfterDelay(Status.RUNNING, node, 50);
}
@Override
public void suspendNode(String id) {
NodeMetadata node = nodes.get(id);
if (node == null)
throw new ResourceNotFoundException("node not found: " + id);
if (node.getStatus() == Status.SUSPENDED)
return;
if (node.getStatus() != Status.RUNNING)
throw new IllegalStateException("to suspend a node, it must be in running status, not: " + formatStatus(node));
setStateOnNode(Status.PENDING, node);
setStateOnNodeAfterDelay(Status.SUSPENDED, node, 50);
}
}
|
|
/*
* Copyright 2004-2006 Stefan Reuter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.asteriskjava.manager.event;
public class DialStateEvent extends ManagerEvent
{
private static final long serialVersionUID = 1L;
String destAccountCode;
String destCallerIdName;
String destCallerIdNum;
String destChannel;
String destChannelState;
String destChannelStateDesc;
String destConnectedLineName;
String destConnectedLineNum;
String destContext;
String destExten;
String destLanguage;
String destLinkedId;
String destPriority;
String destUniqueId;
String dialStatus;
String privilege;
String uniqueId;
String linkedId;
String accountCode;
String language;
String channel;
public DialStateEvent(Object source)
{
super(source);
}
public String getDestAccountCode()
{
return destAccountCode;
}
public void setDestAccountCode(String destAccountCode)
{
this.destAccountCode = destAccountCode;
}
public String getDestCallerIdName()
{
return destCallerIdName;
}
public void setDestCallerIdName(String destCallerIdName)
{
this.destCallerIdName = destCallerIdName;
}
public String getDestCallerIdNum()
{
return destCallerIdNum;
}
public void setDestCallerIdNum(String destCallerIdNum)
{
this.destCallerIdNum = destCallerIdNum;
}
public String getDestChannel()
{
return destChannel;
}
public void setDestChannel(String destChannel)
{
this.destChannel = destChannel;
}
public String getDestChannelState()
{
return destChannelState;
}
public void setDestChannelState(String destChannelState)
{
this.destChannelState = destChannelState;
}
public String getDestChannelStateDesc()
{
return destChannelStateDesc;
}
public void setDestChannelStateDesc(String destChannelStateDesc)
{
this.destChannelStateDesc = destChannelStateDesc;
}
public String getDestConnectedLineName()
{
return destConnectedLineName;
}
public void setDestConnectedLineName(String destConnectedLineName)
{
this.destConnectedLineName = destConnectedLineName;
}
public String getDestConnectedLineNum()
{
return destConnectedLineNum;
}
public void setDestConnectedLineNum(String destConnectedLineNum)
{
this.destConnectedLineNum = destConnectedLineNum;
}
public String getDestContext()
{
return destContext;
}
public void setDestContext(String destContext)
{
this.destContext = destContext;
}
public String getDestExten()
{
return destExten;
}
public void setDestExten(String destExten)
{
this.destExten = destExten;
}
public String getDestLanguage()
{
return destLanguage;
}
public void setDestLanguage(String destLanguage)
{
this.destLanguage = destLanguage;
}
public String getDestLinkedId()
{
return destLinkedId;
}
public void setDestLinkedId(String destLinkedId)
{
this.destLinkedId = destLinkedId;
}
public String getDestPriority()
{
return destPriority;
}
public void setDestPriority(String destPriority)
{
this.destPriority = destPriority;
}
public String getDestUniqueId()
{
return destUniqueId;
}
public void setDestUniqueId(String destUniqueId)
{
this.destUniqueId = destUniqueId;
}
public String getDialStatus()
{
return dialStatus;
}
public void setDialStatus(String dialStatus)
{
this.dialStatus = dialStatus;
}
public String getPrivilege()
{
return privilege;
}
public void setPrivilege(String privilege)
{
this.privilege = privilege;
}
public String getUniqueId()
{
return uniqueId;
}
public void setUniqueId(String uniqueId)
{
this.uniqueId = uniqueId;
}
public String getLinkedId()
{
return linkedId;
}
public void setLinkedId(String linkedId)
{
this.linkedId = linkedId;
}
public String getAccountCode()
{
return accountCode;
}
public void setAccountCode(String accountCode)
{
this.accountCode = accountCode;
}
public String getLanguage()
{
return language;
}
public void setLanguage(String language)
{
this.language = language;
}
public String getChannel()
{
return channel;
}
public void setChannel(String channel)
{
this.channel = channel;
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is based on source code from the Hadoop Project (http://hadoop.apache.org/), licensed by the Apache
* Software Foundation (ASF) under the Apache License, Version 2.0. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.apache.flink.core.fs.local;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.flink.core.fs.BlockLocation;
import org.apache.flink.core.fs.FSDataInputStream;
import org.apache.flink.core.fs.FSDataOutputStream;
import org.apache.flink.core.fs.FileStatus;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.fs.Path;
import org.apache.flink.util.OperatingSystem;
/**
* The class <code>LocalFile</code> provides an implementation of the {@link FileSystem} interface for the local file
* system.
*
*/
public class LocalFileSystem extends FileSystem {
/**
* Path pointing to the current working directory.
*/
private Path workingDir = null;
/**
* The URI representing the local file system.
*/
private final URI name = OperatingSystem.isWindows() ? URI.create("file:/") : URI.create("file:///");
/**
* The host name of this machine;
*/
private final String hostName;
private static final Logger LOG = LoggerFactory.getLogger(LocalFileSystem.class);
/**
* Constructs a new <code>LocalFileSystem</code> object.
*/
public LocalFileSystem() {
this.workingDir = new Path(System.getProperty("user.dir")).makeQualified(this);
String tmp = "unknownHost";
try {
tmp = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
LOG.error("Could not resolve local host", e);
}
this.hostName = tmp;
}
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus file, final long start, final long len)
throws IOException {
final BlockLocation[] blockLocations = new BlockLocation[1];
blockLocations[0] = new LocalBlockLocation(this.hostName, file.getLen());
return blockLocations;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
final File path = pathToFile(f);
if (path.exists()) {
return new LocalFileStatus(pathToFile(f), this);
} else {
throw new FileNotFoundException("File " + f + " does not exist or the user running "
+ "Flink ('"+System.getProperty("user.name")+"') has insufficient permissions to access it.");
}
}
@Override
public URI getUri() {
return name;
}
@Override
public Path getWorkingDirectory() {
return workingDir;
}
@Override
public void initialize(final URI name) throws IOException { }
@Override
public FSDataInputStream open(final Path f, final int bufferSize) throws IOException {
return open(f);
}
@Override
public FSDataInputStream open(final Path f) throws IOException {
final File file = pathToFile(f);
return new LocalDataInputStream(file);
}
private File pathToFile(Path path) {
if (!path.isAbsolute()) {
path = new Path(getWorkingDirectory(), path);
}
return new File(path.toUri().getPath());
}
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
final File localf = pathToFile(f);
FileStatus[] results;
if (!localf.exists()) {
return null;
}
if (localf.isFile()) {
return new FileStatus[] { new LocalFileStatus(localf, this) };
}
final String[] names = localf.list();
if (names == null) {
return null;
}
results = new FileStatus[names.length];
for (int i = 0; i < names.length; i++) {
results[i] = getFileStatus(new Path(f, names[i]));
}
return results;
}
@Override
public boolean delete(final Path f, final boolean recursive) throws IOException {
final File file = pathToFile(f);
if (file.isFile()) {
return file.delete();
} else if ((!recursive) && file.isDirectory() && (file.listFiles().length != 0)) {
throw new IOException("Directory " + file.toString() + " is not empty");
}
return delete(file);
}
/**
* Deletes the given file or directory.
*
* @param f
* the file to be deleted
* @return <code>true</code> if all files were deleted successfully, <code>false</code> otherwise
* @throws IOException
* thrown if an error occurred while deleting the files/directories
*/
private boolean delete(final File f) throws IOException {
if (f.isDirectory()) {
final File[] files = f.listFiles();
for (int i = 0; i < files.length; i++) {
final boolean del = delete(files[i]);
if (del == false) {
return false;
}
}
} else {
return f.delete();
}
// Now directory is empty
return f.delete();
}
/**
* Recursively creates the directory specified by the provided path.
*
* @return <code>true</code>if the directories either already existed or have been created successfully,
* <code>false</code> otherwise
* @throws IOException
* thrown if an error occurred while creating the directory/directories
*/
public boolean mkdirs(final Path f) throws IOException {
final Path parent = f.getParent();
final File p2f = pathToFile(f);
return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
}
@Override
public FSDataOutputStream create(final Path f, final boolean overwrite, final int bufferSize,
final short replication, final long blockSize) throws IOException {
if (exists(f) && !overwrite) {
throw new IOException("File already exists:" + f);
}
final Path parent = f.getParent();
if (parent != null && !mkdirs(parent)) {
throw new IOException("Mkdirs failed to create " + parent.toString());
}
final File file = pathToFile(f);
return new LocalDataOutputStream(file);
}
@Override
public FSDataOutputStream create(final Path f, final boolean overwrite) throws IOException {
return create(f, overwrite, 0, (short) 0, 0);
}
@Override
public boolean rename(final Path src, final Path dst) throws IOException {
final File srcFile = pathToFile(src);
final File dstFile = pathToFile(dst);
return srcFile.renameTo(dstFile);
}
@Override
public boolean isDistributedFS() {
return false;
}
}
|
|
/**
Copyright 2017 Andrea "Stock" Stocchero
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.pepstock.charba.client.annotation;
import org.pepstock.charba.client.annotation.callbacks.CalloutPositionCallback;
import org.pepstock.charba.client.annotation.callbacks.MarginCallback;
import org.pepstock.charba.client.annotation.callbacks.SideCallback;
import org.pepstock.charba.client.annotation.callbacks.StartCallback;
import org.pepstock.charba.client.annotation.enums.CalloutPosition;
import org.pepstock.charba.client.callbacks.DisplayCallback;
import org.pepstock.charba.client.callbacks.NativeCallback;
import org.pepstock.charba.client.callbacks.ScriptableFunctions.ProxyBooleanCallback;
import org.pepstock.charba.client.callbacks.ScriptableFunctions.ProxyIntegerCallback;
import org.pepstock.charba.client.callbacks.ScriptableFunctions.ProxyObjectCallback;
import org.pepstock.charba.client.callbacks.ScriptableFunctions.ProxyStringCallback;
import org.pepstock.charba.client.callbacks.ScriptableIntegerChecker;
import org.pepstock.charba.client.callbacks.ScriptableUtils;
import org.pepstock.charba.client.commons.AbstractNode;
import org.pepstock.charba.client.commons.CallbackPropertyHandler;
import org.pepstock.charba.client.commons.CallbackProxy;
import org.pepstock.charba.client.commons.Checker;
import org.pepstock.charba.client.commons.JsHelper;
import org.pepstock.charba.client.commons.Key;
import org.pepstock.charba.client.commons.NativeObject;
import org.pepstock.charba.client.enums.CapStyle;
import org.pepstock.charba.client.enums.JoinStyle;
import org.pepstock.charba.client.items.Undefined;
import org.pepstock.charba.client.utils.Utilities;
/**
* Implements a <b>CALLOUT</b> to apply on a LABEL annotation.
*
* @author Andrea "Stock" Stocchero
*
*/
public final class Callout extends AbstractNode implements IsDefaultsCallout, HasBorderOptions, HasExtendedBorderOptions {
/**
* Default callout display, <b>{@value DEFAULT_DISPLAY}</b>.
*/
public static final boolean DEFAULT_DISPLAY = false;
/**
* Default callout border width, <b>{@value DEFAULT_BORDER_WIDTH}</b>.
*/
public static final int DEFAULT_BORDER_WIDTH = 1;
/**
* Default callout border cap style, <b>{@link CapStyle#BUTT}</b>.
*/
public static final CapStyle DEFAULT_BORDER_CAP_STYLE = CapStyle.BUTT;
/**
* Default callout border join style, <b>{@link JoinStyle#MITER}</b>.
*/
public static final JoinStyle DEFAULT_BORDER_JOIN_STYLE = JoinStyle.MITER;
/**
* Default callout border dash offset, <b>{@value DEFAULT_BORDER_DASH_OFFSET}</b>.
*/
public static final int DEFAULT_BORDER_DASH_OFFSET = 0;
/**
* Default amount of pixels between the label and the callout separator, <b>{@value DEFAULT_MARGIN}</b>.
*/
public static final int DEFAULT_MARGIN = 5;
/**
* Default width of the starter line of callout pointer, <b>{@value DEFAULT_SIDE}</b>.
*/
public static final int DEFAULT_SIDE = 5;
/**
* Default the separator dimension in pixels to use as starting point for callout pointer, <b>{@link Undefined#INTEGER}</b>.
*/
public static final int DEFAULT_START = Undefined.INTEGER;
/**
* Default the percentage of the separator dimension to use as starting point for callout pointer, <b>{@value DEFAULT_START_AS_PERCENTAGE}</b>.
*/
public static final double DEFAULT_START_AS_PERCENTAGE = 0.5;
/**
* Name of properties of native object.
*/
private enum Property implements Key
{
// even if in the JS plugin the options is called "enabled"
// we think that "display" is more coherent with the scope of the option
// and then Charba use "display" in the method
ENABLED("enabled"),
MARGIN("margin"),
POSITION("position"),
SIDE("side"),
START("start");
// name value of property
private final String value;
/**
* Creates with the property value to use in the native object.
*
* @param value value of property name
*/
private Property(String value) {
this.value = value;
}
/*
* (non-Javadoc)
*
* @see org.pepstock.charba.client.commons.Key#value()
*/
@Override
public String value() {
return value;
}
}
// ---------------------------
// -- CALLBACKS PROXIES ---
// ---------------------------
// callback proxy to invoke the display function
private final CallbackProxy<ProxyBooleanCallback> displayCallbackProxy = JsHelper.get().newCallbackProxy();
// callback proxy to invoke the margin function
private final CallbackProxy<ProxyIntegerCallback> marginCallbackProxy = JsHelper.get().newCallbackProxy();
// callback proxy to invoke the side function
private final CallbackProxy<ProxyIntegerCallback> sideCallbackProxy = JsHelper.get().newCallbackProxy();
// callback proxy to invoke the start function
private final CallbackProxy<ProxyObjectCallback> startCallbackProxy = JsHelper.get().newCallbackProxy();
// callback proxy to invoke the position function
private final CallbackProxy<ProxyStringCallback> positionCallbackProxy = JsHelper.get().newCallbackProxy();
// callback instance to handle display options
private static final CallbackPropertyHandler<DisplayCallback<AnnotationContext>> DISPLAY_PROPERTY_HANDLER = new CallbackPropertyHandler<>(Property.ENABLED);
// callback instance to handle margin options
private static final CallbackPropertyHandler<MarginCallback> MARGIN_PROPERTY_HANDLER = new CallbackPropertyHandler<>(Property.MARGIN);
// callback instance to handle side options
private static final CallbackPropertyHandler<SideCallback> SIDE_PROPERTY_HANDLER = new CallbackPropertyHandler<>(Property.SIDE);
// callback instance to handle start options
private static final CallbackPropertyHandler<StartCallback> START_PROPERTY_HANDLER = new CallbackPropertyHandler<>(Property.START);
// callback instance to handle position options
private static final CallbackPropertyHandler<CalloutPositionCallback> POSITION_PROPERTY_HANDLER = new CallbackPropertyHandler<>(Property.POSITION);
// label annotation parent instance
private final AbstractAnnotation parent;
// defaults options
private final IsDefaultsCallout defaultValues;
// border options handler
private final BorderOptionsHandler borderOptionsHandler;
// extended border options handler
private final ExtendedBorderOptionsHandler extendedBorderOptionsHandler;
/**
* To avoid any instantiation because is added in the all {@link LineAnnotation}.
*
* @param parent {@link LineAnnotation} instance which contains the label
* @param nativeObject native object to wrap, with all properties of a label
* @param defaultValues default options instance
*/
Callout(AbstractAnnotation parent, NativeObject nativeObject, IsDefaultsCallout defaultValues) {
super(nativeObject);
// stores line annotation parent
this.parent = parent;
// checks if default value is consistent
// stores default options
this.defaultValues = checkDefaultValuesArgument(defaultValues);
// stores incremental ID
setNewIncrementalId();
// creates border options handler
this.borderOptionsHandler = new BorderOptionsHandler(this.parent, this.defaultValues, getNativeObject());
// creates border options handler
this.extendedBorderOptionsHandler = new ExtendedBorderOptionsHandler(this.parent, this.defaultValues, getNativeObject());
// -------------------------------
// -- SET CALLBACKS to PROXIES ---
// -------------------------------
// sets function to proxy callback in order to invoke the java interface
this.displayCallbackProxy.setCallback(context -> ScriptableUtils.getOptionValue(new AnnotationContext(this.parent, context), getDisplayCallback(), this.defaultValues.isDisplay()));
// sets function to proxy callback in order to invoke the java interface
this.marginCallbackProxy.setCallback(context -> ScriptableUtils.getOptionValueAsNumber(new AnnotationContext(this.parent, context), getMarginCallback(), this.defaultValues.getMargin(), ScriptableIntegerChecker.POSITIVE_OR_DEFAULT).intValue());
// sets function to proxy callback in order to invoke the java interface
this.sideCallbackProxy.setCallback(context -> ScriptableUtils.getOptionValueAsNumber(new AnnotationContext(this.parent, context), getSideCallback(), this.defaultValues.getSide(), ScriptableIntegerChecker.POSITIVE_OR_DEFAULT).intValue());
// sets function to proxy callback in order to invoke the java interface
this.startCallbackProxy.setCallback(context -> onStart(new AnnotationContext(this.parent, context), this.defaultValues.getStart()));
// sets function to proxy callback in order to invoke the java interface
this.positionCallbackProxy.setCallback(context -> ScriptableUtils.getOptionValue(new AnnotationContext(this.parent, context), getPositionCallback(), this.defaultValues.getPosition()).value());
}
/*
* (non-Javadoc)
*
* @see org.pepstock.charba.client.annotation.HasBorderOptions#getBorderOptionsHandler()
*/
@Override
public BorderOptionsHandler getBorderOptionsHandler() {
return borderOptionsHandler;
}
/*
* (non-Javadoc)
*
* @see org.pepstock.charba.client.annotation.HasExtendedBorderOptions#getExtendedBorderOptionsHandler()
*/
@Override
public ExtendedBorderOptionsHandler getExtendedBorderOptionsHandler() {
return extendedBorderOptionsHandler;
}
/**
* Sets <code>true</code> whether the label should be displayed.
*
* @param display <code>true</code> whether the label should be displayed
*/
public void setDisplay(boolean display) {
// resets callback
setDisplay((DisplayCallback<AnnotationContext>) null);
// stores value
setValue(Property.ENABLED, display);
}
/**
* Returns <code>true</code> whether the label should be displayed.
*
* @return <code>true</code> whether the label should be displayed
*/
@Override
public boolean isDisplay() {
return getValue(Property.ENABLED, defaultValues.isDisplay());
}
/**
* Sets the amount of pixels between the label and the callout separator.
*
* @param margin the amount of pixels between the label and the callout separator
*/
public void setMargin(int margin) {
// resets callback
setMargin((MarginCallback) null);
// stores value
setValue(Property.MARGIN, Checker.positiveOrZero(margin));
}
/**
* Returns the amount of pixels between the label and the callout separator.
*
* @return the amount of pixels between the label and the callout separator.
*/
@Override
public int getMargin() {
return getValue(Property.MARGIN, defaultValues.getMargin());
}
/**
* Sets the width of the starter line of callout pointer.
*
* @param side the width of the starter line of callout pointer
*/
public void setSide(int side) {
// resets callback
setSide((SideCallback) null);
// stores value
setValue(Property.SIDE, Checker.positiveOrZero(side));
}
/**
* Returns the width of the starter line of callout pointer.
*
* @return the width of the starter line of callout pointer
*/
@Override
public int getSide() {
return getValue(Property.SIDE, defaultValues.getSide());
}
/**
* Sets the separator dimension in pixels to use as starting point for callout pointer.
*
* @param start the separator dimension in pixels to use as starting point for callout pointer
*/
public void setStart(int start) {
// resets callback
setStart((StartCallback) null);
// stores value
setValue(Property.START, Checker.positiveOrZero(start));
}
/**
* Returns the separator dimension in pixels to use as starting point for callout pointer.
*
* @return the separator dimension in pixels to use as starting point for callout pointer
*/
@Override
public int getStart() {
return getValue(Property.START, defaultValues.getStart());
}
/**
* Sets the percentage of the separator dimension to use as starting point for callout pointer.
*
* @param start the percentage of the separator dimension to use as starting point for callout pointer
*/
public void setStartAsPercentage(double start) {
// resets callback
setStart((StartCallback) null);
// stores value
setValue(Property.START, Utilities.getAsPercentage(start));
}
/**
* Returns the percentage of the separator dimension to use as starting point for callout pointer.
*
* @return the percentage of the separator dimension to use as starting point for callout pointer
*/
@Override
public double getStartAsPercentage() {
return Utilities.getAsPercentage(getValue(Property.START, Undefined.STRING), defaultValues.getStartAsPercentage());
}
/**
* Sets the position of callout, with respect to the label.
*
* @param position the position of callout, with respect to the label
*/
public void setPosition(CalloutPosition position) {
// resets callback
setPosition((CalloutPositionCallback) null);
// stores value
setValue(Property.POSITION, position);
}
/**
* Returns the position of callout, with respect to the label.
*
* @return the position of callout, with respect to the label.
*/
@Override
public CalloutPosition getPosition() {
return getValue(Property.POSITION, CalloutPosition.values(), defaultValues.getPosition());
}
// ---------------------
// CALLBACKS
// ---------------------
/**
* Returns the callback called to set whether the label should be displayed.
*
* @return the callback called to set whether the label should be displayed
*/
@Override
public DisplayCallback<AnnotationContext> getDisplayCallback() {
return DISPLAY_PROPERTY_HANDLER.getCallback(this, defaultValues.getDisplayCallback());
}
/**
* Sets the callback to set whether the label should be displayed.
*
* @param displayCallback to set whether the label should be displayed
*/
public void setDisplay(DisplayCallback<AnnotationContext> displayCallback) {
DISPLAY_PROPERTY_HANDLER.setCallback(this, AnnotationPlugin.ID, displayCallback, displayCallbackProxy.getProxy());
}
/**
* Sets the callback to set whether the label should be displayed.
*
* @param displayCallback to set whether the label should be displayed
*/
public void setDisplay(NativeCallback displayCallback) {
// resets callback
setDisplay((DisplayCallback<AnnotationContext>) null);
// stores values
setValueAndAddToParent(Property.ENABLED, displayCallback);
}
/**
* Returns the callback to set the amount of pixels between the label and the callout separator.
*
* @return the callback to set the amount of pixels between the label and the callout separator
*/
@Override
public MarginCallback getMarginCallback() {
return MARGIN_PROPERTY_HANDLER.getCallback(this, defaultValues.getMarginCallback());
}
/**
* Sets the callback to set the amount of pixels between the label and the callout separator.
*
* @param marginCallback the callback to set the amount of pixels between the label and the callout separator
*/
public void setMargin(MarginCallback marginCallback) {
MARGIN_PROPERTY_HANDLER.setCallback(this, AnnotationPlugin.ID, marginCallback, marginCallbackProxy.getProxy());
}
/**
* Sets the callback to set the amount of pixels between the label and the callout separator.
*
* @param marginCallback the callback to set the amount of pixels between the label and the callout separator
*/
public void setMargin(NativeCallback marginCallback) {
// resets callback
setMargin((MarginCallback) null);
// stores values
setValueAndAddToParent(Property.MARGIN, marginCallback);
}
/**
* Returns the callback to set the width of the starter line of callout pointer.
*
* @return the callback to set the width of the starter line of callout pointer
*/
@Override
public SideCallback getSideCallback() {
return SIDE_PROPERTY_HANDLER.getCallback(this, defaultValues.getSideCallback());
}
/**
* Sets the callback to set the width of the starter line of callout pointer.
*
* @param sideCallback the callback to set the width of the starter line of callout pointer
*/
public void setSide(SideCallback sideCallback) {
SIDE_PROPERTY_HANDLER.setCallback(this, AnnotationPlugin.ID, sideCallback, sideCallbackProxy.getProxy());
}
/**
* Sets the callback to set the width of the starter line of callout pointer.
*
* @param sideCallback the callback to set the width of the starter line of callout pointer
*/
public void setSide(NativeCallback sideCallback) {
// resets callback
setSide((SideCallback) null);
// stores values
setValueAndAddToParent(Property.SIDE, sideCallback);
}
/**
* Returns the callback to set the separator dimension to use as starting point for callout pointer.
*
* @return the callback to set the separator dimension to use as starting point for callout pointer
*/
@Override
public StartCallback getStartCallback() {
return START_PROPERTY_HANDLER.getCallback(this, defaultValues.getStartCallback());
}
/**
* Sets the callback to set the separator dimension to use as starting point for callout pointer.
*
* @param startCallback the callback to set the separator dimension to use as starting point for callout pointer
*/
public void setStart(StartCallback startCallback) {
START_PROPERTY_HANDLER.setCallback(this, AnnotationPlugin.ID, startCallback, startCallbackProxy.getProxy());
}
/**
* Sets the callback to set the separator dimension to use as starting point for callout pointer.
*
* @param startCallback the callback to set the separator dimension to use as starting point for callout pointer
*/
public void setStart(NativeCallback startCallback) {
// resets callback
setStart((StartCallback) null);
// stores values
setValueAndAddToParent(Property.START, startCallback);
}
/**
* Returns the callback to set the position of callout, with respect to the label.
*
* @return the callback to set the position of callout, with respect to the label
*/
@Override
public CalloutPositionCallback getPositionCallback() {
return POSITION_PROPERTY_HANDLER.getCallback(this, defaultValues.getPositionCallback());
}
/**
* Sets the callback to set the position of callout, with respect to the label.
*
* @param positionCallback the callback to set the position of callout, with respect to the label
*/
public void setPosition(CalloutPositionCallback positionCallback) {
POSITION_PROPERTY_HANDLER.setCallback(this, AnnotationPlugin.ID, positionCallback, positionCallbackProxy.getProxy());
}
/**
* Sets the callback to set the position of callout, with respect to the label.
*
* @param positionCallback the callback to set the position of callout, with respect to the label
*/
public void setPosition(NativeCallback positionCallback) {
// resets callback
setPosition((CalloutPositionCallback) null);
// stores values
setValueAndAddToParent(Property.POSITION, positionCallback);
}
// -----------------------
// INTERNALS for CALLBACKS
// -----------------------
/**
* Returns an object as string when the callback has been activated.
*
* @param context annotation context instance.
* @param defaultValue default value to apply if callback returns an inconsistent value
* @return an object as string
*/
private Object onStart(AnnotationContext context, double defaultValue) {
// gets value
Number result = ScriptableUtils.getOptionValue(context, getStartCallback(), defaultValue);
// checks if consistent
if (result instanceof Integer) {
// returns as integer
return result.intValue();
} else if (result instanceof Double) {
// is a percentage
// returns the double value
return Utilities.getAsPercentage(result.doubleValue());
}
// if here the result is null
// then returns the default
return Utilities.getAsPercentage(defaultValue);
}
}
|
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.index;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* Index request to index a typed JSON document into a specific index and make it searchable. Best
* created using {@link org.elasticsearch.client.Requests#indexRequest(String)}.
*
* The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and
* {@link #source(byte[], XContentType)} to be set.
*
* The source (content to index) can be set in its bytes form using ({@link #source(byte[], XContentType)}),
* its string form ({@link #source(String, XContentType)}) or using a {@link org.elasticsearch.common.xcontent.XContentBuilder}
* ({@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}).
*
* If the {@link #id(String)} is not set, it will be automatically generated.
*
* @see IndexResponse
* @see org.elasticsearch.client.Requests#indexRequest(String)
* @see org.elasticsearch.client.Client#index(IndexRequest)
*/
public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implements DocWriteRequest<IndexRequest>, CompositeIndicesRequest {
/**
* Max length of the source document to include into toString()
*
* @see ReplicationRequest#createTask(long, java.lang.String, java.lang.String, org.elasticsearch.tasks.TaskId)
*/
static final int MAX_SOURCE_LENGTH_IN_TOSTRING = 2048;
private String type;
private String id;
@Nullable
private String routing;
@Nullable
private String parent;
private BytesReference source;
private OpType opType = OpType.INDEX;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
private XContentType contentType;
private String pipeline;
/**
* Value for {@link #getAutoGeneratedTimestamp()} if the document has an external
* provided ID.
*/
public static final int UNSET_AUTO_GENERATED_TIMESTAMP = -1;
private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
private boolean isRetry = false;
public IndexRequest() {
}
/**
* Constructs a new index request against the specific index. The {@link #type(String)}
* {@link #source(byte[], XContentType)} must be set.
*/
public IndexRequest(String index) {
this.index = index;
}
/**
* Constructs a new index request against the specific index and type. The
* {@link #source(byte[], XContentType)} must be set.
*/
public IndexRequest(String index, String type) {
this.index = index;
this.type = type;
}
/**
* Constructs a new index request against the index, type, id and using the source.
*
* @param index The index to index into
* @param type The type to index into
* @param id The id of document
*/
public IndexRequest(String index, String type, String id) {
this.index = index;
this.type = type;
this.id = id;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (type == null) {
validationException = addValidationError("type is missing", validationException);
}
if (source == null) {
validationException = addValidationError("source is missing", validationException);
}
if (contentType == null) {
validationException = addValidationError("content type is missing", validationException);
}
final long resolvedVersion = resolveVersionDefaults();
if (opType() == OpType.CREATE) {
if (versionType != VersionType.INTERNAL) {
validationException = addValidationError("create operations only support internal versioning. use index instead", validationException);
return validationException;
}
if (resolvedVersion != Versions.MATCH_DELETED) {
validationException = addValidationError("create operations do not support explicit versions. use index instead", validationException);
return validationException;
}
}
if (opType() != OpType.INDEX && id == null) {
addValidationError("an id is required for a " + opType() + " operation", validationException);
}
if (!versionType.validateVersionForWrites(resolvedVersion)) {
validationException = addValidationError("illegal version value [" + resolvedVersion + "] for version type [" + versionType.name() + "]", validationException);
}
if (versionType == VersionType.FORCE) {
validationException = addValidationError("version type [force] may no longer be used", validationException);
}
if (id != null && id.getBytes(StandardCharsets.UTF_8).length > 512) {
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
id.getBytes(StandardCharsets.UTF_8).length, validationException);
}
if (id == null && (versionType == VersionType.INTERNAL && resolvedVersion == Versions.MATCH_ANY) == false) {
validationException = addValidationError("an id must be provided if version type or value are set", validationException);
}
return validationException;
}
/**
* The content type. This will be used when generating a document from user provided objects like Maps and when parsing the
* source at index time
*/
public XContentType getContentType() {
return contentType;
}
/**
* The type of the indexed document.
*/
@Override
public String type() {
return type;
}
/**
* Sets the type of the indexed document.
*/
public IndexRequest type(String type) {
this.type = type;
return this;
}
/**
* The id of the indexed document. If not set, will be automatically generated.
*/
@Override
public String id() {
return id;
}
/**
* Sets the id of the indexed document. If not set, will be automatically generated.
*/
public IndexRequest id(String id) {
this.id = id;
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
@Override
public IndexRequest routing(String routing) {
if (routing != null && routing.length() == 0) {
this.routing = null;
} else {
this.routing = routing;
}
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
@Override
public String routing() {
return this.routing;
}
/**
* Sets the parent id of this document.
*/
public IndexRequest parent(String parent) {
this.parent = parent;
return this;
}
@Override
public String parent() {
return this.parent;
}
/**
* Sets the ingest pipeline to be executed before indexing the document
*/
public IndexRequest setPipeline(String pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Returns the ingest pipeline to be executed before indexing the document
*/
public String getPipeline() {
return this.pipeline;
}
/**
* The source of the document to index, recopied to a new array if it is unsafe.
*/
public BytesReference source() {
return source;
}
public Map<String, Object> sourceAsMap() {
return XContentHelper.convertToMap(source, false, contentType).v2();
}
/**
* Index the Map in {@link Requests#INDEX_CONTENT_TYPE} format
*
* @param source The map to index
*/
public IndexRequest source(Map source) throws ElasticsearchGenerationException {
return source(source, Requests.INDEX_CONTENT_TYPE);
}
/**
* Index the Map as the provided content type.
*
* @param source The map to index
*/
public IndexRequest source(Map source, XContentType contentType) throws ElasticsearchGenerationException {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.map(source);
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
}
/**
* Sets the document source to index.
*
* Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}
* or using the {@link #source(byte[], XContentType)}.
*/
public IndexRequest source(String source, XContentType xContentType) {
return source(new BytesArray(source), xContentType);
}
/**
* Sets the content source to index.
*/
public IndexRequest source(XContentBuilder sourceBuilder) {
return source(sourceBuilder.bytes(), sourceBuilder.contentType());
}
/**
* Sets the content source to index using the default content type ({@link Requests#INDEX_CONTENT_TYPE})
* <p>
* <b>Note: the number of objects passed to this method must be an even
* number. Also the first argument in each pair (the field name) must have a
* valid String representation.</b>
* </p>
*/
public IndexRequest source(Object... source) {
return source(Requests.INDEX_CONTENT_TYPE, source);
}
/**
* Sets the content source to index.
* <p>
* <b>Note: the number of objects passed to this method as varargs must be an even
* number. Also the first argument in each pair (the field name) must have a
* valid String representation.</b>
* </p>
*/
public IndexRequest source(XContentType xContentType, Object... source) {
if (source.length % 2 != 0) {
throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
}
if (source.length == 2 && source[0] instanceof BytesReference && source[1] instanceof Boolean) {
throw new IllegalArgumentException("you are using the removed method for source with bytes and unsafe flag, the unsafe flag was removed, please just use source(BytesReference)");
}
try {
XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
builder.startObject();
for (int i = 0; i < source.length; i++) {
builder.field(source[i++].toString(), source[i]);
}
builder.endObject();
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate", e);
}
}
/**
* Sets the document to index in bytes form.
*/
public IndexRequest source(BytesReference source, XContentType xContentType) {
this.source = Objects.requireNonNull(source);
this.contentType = Objects.requireNonNull(xContentType);
return this;
}
/**
* Sets the document to index in bytes form.
*/
public IndexRequest source(byte[] source, XContentType xContentType) {
return source(source, 0, source.length, xContentType);
}
/**
* Sets the document to index in bytes form (assumed to be safe to be used from different
* threads).
*
* @param source The source to index
* @param offset The offset in the byte array
* @param length The length of the data
*/
public IndexRequest source(byte[] source, int offset, int length, XContentType xContentType) {
return source(new BytesArray(source, offset, length), xContentType);
}
/**
* Sets the type of operation to perform.
*/
public IndexRequest opType(OpType opType) {
if (opType != OpType.CREATE && opType != OpType.INDEX) {
throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]");
}
this.opType = opType;
return this;
}
/**
* Sets a string representation of the {@link #opType(OpType)}. Can
* be either "index" or "create".
*/
public IndexRequest opType(String opType) {
String op = opType.toLowerCase(Locale.ROOT);
if (op.equals("create")) {
opType(OpType.CREATE);
} else if (op.equals("index")) {
opType(OpType.INDEX);
} else {
throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]");
}
return this;
}
/**
* Set to <tt>true</tt> to force this index to use {@link OpType#CREATE}.
*/
public IndexRequest create(boolean create) {
if (create) {
return opType(OpType.CREATE);
} else {
return opType(OpType.INDEX);
}
}
@Override
public OpType opType() {
return this.opType;
}
@Override
public IndexRequest version(long version) {
this.version = version;
return this;
}
/**
* Returns stored version. If currently stored version is {@link Versions#MATCH_ANY} and
* opType is {@link OpType#CREATE}, returns {@link Versions#MATCH_DELETED}.
*/
@Override
public long version() {
return resolveVersionDefaults();
}
/**
* Resolves the version based on operation type {@link #opType()}.
*/
private long resolveVersionDefaults() {
if (opType == OpType.CREATE && version == Versions.MATCH_ANY) {
return Versions.MATCH_DELETED;
} else {
return version;
}
}
@Override
public IndexRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
@Override
public VersionType versionType() {
return this.versionType;
}
public void process(@Nullable MappingMetaData mappingMd, String concreteIndex) {
if (mappingMd != null) {
// might as well check for routing here
if (mappingMd.routing().required() && routing == null) {
throw new RoutingMissingException(concreteIndex, type, id);
}
if (parent != null && !mappingMd.hasParentField()) {
throw new IllegalArgumentException("can't specify parent if no parent field has been configured");
}
} else {
if (parent != null) {
throw new IllegalArgumentException("can't specify parent if no parent field has been configured");
}
}
if ("".equals(id)) {
throw new IllegalArgumentException("if _id is specified it must not be empty");
}
// generate id if not already provided
if (id == null) {
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
id(UUIDs.base64UUID());
}
}
/* resolve the routing if needed */
public void resolveRouting(MetaData metaData) {
routing(metaData.resolveIndexRouting(parent, routing, index));
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
type = in.readOptionalString();
id = in.readOptionalString();
routing = in.readOptionalString();
parent = in.readOptionalString();
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
in.readOptionalString(); // timestamp
in.readOptionalWriteable(TimeValue::new); // ttl
}
source = in.readBytesReference();
opType = OpType.fromId(in.readByte());
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
pipeline = in.readOptionalString();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
contentType = in.readOptionalWriteable(XContentType::readFrom);
} else {
contentType = XContentFactory.xContentType(source);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(type);
out.writeOptionalString(id);
out.writeOptionalString(routing);
out.writeOptionalString(parent);
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
// Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null.
// On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for
// the transport layer OK as it will be ignored.
out.writeOptionalString("0");
out.writeOptionalWriteable(null);
}
out.writeBytesReference(source);
out.writeByte(opType.getId());
// ES versions below 5.1.2 don't know about resolveVersionDefaults but resolve the version eagerly (which messes with validation).
if (out.getVersion().before(Version.V_5_1_2)) {
out.writeLong(resolveVersionDefaults());
} else {
out.writeLong(version);
}
out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline);
out.writeBoolean(isRetry);
out.writeLong(autoGeneratedTimestamp);
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeOptionalWriteable(contentType);
}
}
@Override
public String toString() {
String sSource = "_na_";
try {
if (source.length() > MAX_SOURCE_LENGTH_IN_TOSTRING) {
sSource = "n/a, actual length: [" + new ByteSizeValue(source.length()).toString() + "], max length: " +
new ByteSizeValue(MAX_SOURCE_LENGTH_IN_TOSTRING).toString();
} else {
sSource = XContentHelper.convertToJson(source, false);
}
} catch (Exception e) {
// ignore
}
return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}";
}
/**
* Returns <code>true</code> if this request has been sent to a shard copy more than once.
*/
public boolean isRetry() {
return isRetry;
}
@Override
public void onRetry() {
isRetry = true;
}
/**
* Returns the timestamp the auto generated ID was created or {@value #UNSET_AUTO_GENERATED_TIMESTAMP} if the
* document has no auto generated timestamp. This method will return a positive value iff the id was auto generated.
*/
public long getAutoGeneratedTimestamp() {
return autoGeneratedTimestamp;
}
/**
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
*/
@Override
public long primaryTerm() {
throw new UnsupportedOperationException("primary term should never be set on IndexRequest");
}
/**
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
*/
@Override
public void primaryTerm(long term) {
throw new UnsupportedOperationException("primary term should never be set on IndexRequest");
}
/**
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
*/
@Override
public IndexRequest setShardId(ShardId shardId) {
throw new UnsupportedOperationException("shard id should never be set on IndexRequest");
}
}
|
|
/*
* $Id$
* This file is a part of the Arakhne Foundation Classes, http://www.arakhne.org/afc
*
* Copyright (c) 2000-2012 Stephane GALLAND.
* Copyright (c) 2005-10, Multiagent Team, Laboratoire Systemes et Transports,
* Universite de Technologie de Belfort-Montbeliard.
* Copyright (c) 2013-2020 The original authors, and other authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.arakhne.afc.math.geometry.d2.i;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.eclipse.xtext.xbase.lib.Pure;
import org.arakhne.afc.math.geometry.d2.ai.MultiShape2ai;
import org.arakhne.afc.vmutil.asserts.AssertMessages;
/** Container for grouping of shapes.
*
* <p>The coordinates of the shapes inside the multishape are global. They are not relative to the multishape.
*
* <p>Caution: The multishape does not detect the bound change of the stored shapes.
*
* @param <T> the type of the shapes inside the multishape.
* @author $Author: tpiotrow$
* @author $Author: sgalland$
* @version $FullVersion$
* @mavengroupid $GroupId$
* @mavenartifactid $ArtifactId$
* @since 13.0
*/
public class MultiShape2i<T extends Shape2i<?>> extends AbstractShape2i<MultiShape2i<T>>
implements MultiShape2ai<Shape2i<?>, MultiShape2i<T>, T, PathElement2i, Point2i, Vector2i, Rectangle2i> {
private static final long serialVersionUID = -4727279807601027239L;
private List<T> elements = new ListResponseModel();
private Rectangle2i bounds;
/**
* Construct an empty multishape.
*/
public MultiShape2i() {
//
}
/** Construct a multishape with shapes inside.
*
* @param shapes the shapes to add into the multishape.
*/
public MultiShape2i(@SuppressWarnings("unchecked") T... shapes) {
assert shapes != null : AssertMessages.notNullParameter();
addAll(Arrays.asList(shapes));
}
/** Construct a multishape with shapes inside.
*
* @param shapes the shapes to add into the multishape.
*/
public MultiShape2i(Iterable<? extends T> shapes) {
assert shapes != null : AssertMessages.notNullParameter();
for (final T element : shapes) {
add(element);
}
}
@SuppressWarnings("unchecked")
@Override
public MultiShape2i<T> clone() {
final MultiShape2i<T> clone = super.clone();
final List<T> clonedList = new ArrayList<>();
for (final T shape : this.elements) {
clonedList.add((T) shape.clone());
}
clone.elements = clonedList;
if (this.bounds != null) {
clone.bounds = this.bounds.clone();
}
return clone;
}
@Override
@SuppressWarnings("checkstyle:equalshashcode")
public int hashCode() {
return this.elements.hashCode();
}
@Override
public void onBackendDataChange() {
this.bounds = null;
fireGeometryChange();
}
/** Invoked when the geometry of the content has changed.
*/
protected void onContentGeometryChange() {
this.bounds = null;
fireGeometryChange();
}
@Pure
@Override
public List<T> getBackendDataList() {
return this.elements;
}
@Pure
@Override
public Rectangle2i toBoundingBox() {
if (this.bounds == null) {
this.bounds = getGeomFactory().newBox();
MultiShape2ai.super.toBoundingBox(this.bounds);
}
return this.bounds;
}
@Pure
@Override
public void toBoundingBox(Rectangle2i box) {
assert box != null : AssertMessages.notNullParameter();
if (this.bounds == null) {
this.bounds = getGeomFactory().newBox();
MultiShape2ai.super.toBoundingBox(this.bounds);
}
box.set(this.bounds);
}
@Override
public void translate(int dx, int dy) {
if (dx != 0 || dy != 0) {
final Rectangle2i box = this.bounds;
MultiShape2ai.super.translate(dx, dy);
if (box != null) {
box.translate(dx, dy);
this.bounds = box;
}
fireGeometryChange();
}
}
/** List responsive model.
*
* @author $Author: sgalland$
* @version $FullVersion$
* @mavengroupid $GroupId$
* @mavenartifactid $ArtifactId$
* @since 13.0
*/
private class ListResponseModel extends AbstractList<T> implements ShapeGeometryChangeListener {
private List<T> delegate = new ArrayList<>();
/** Construct an empty model.
*/
ListResponseModel() {
//
}
@Override
public void add(int index, T element) {
assert element != null;
this.delegate.add(index, element);
if (element instanceof AbstractShape2i<?>) {
((AbstractShape2i<?>) element).addShapeGeometryChangeListener(this);
}
}
@Override
public T remove(int index) {
final T element = this.delegate.remove(index);
if (element instanceof AbstractShape2i<?>) {
((AbstractShape2i<?>) element).removeShapeGeometryChangeListener(this);
}
return element;
}
@Override
public T set(int index, T element) {
assert element != null;
final T oldElement = this.delegate.set(index, element);
if (oldElement instanceof AbstractShape2i<?>) {
((AbstractShape2i<?>) oldElement).removeShapeGeometryChangeListener(this);
}
if (element instanceof AbstractShape2i<?>) {
((AbstractShape2i<?>) element).addShapeGeometryChangeListener(this);
}
return oldElement;
}
@Override
public T get(int index) {
return this.delegate.get(index);
}
@Override
public int size() {
return this.delegate.size();
}
@Override
public void shapeGeometryChange(Shape2i<?> shape) {
onContentGeometryChange();
}
@Override
public boolean equals(Object obj) {
return this.delegate.equals(obj);
}
@Override
public int hashCode() {
return this.delegate.hashCode();
}
}
}
|
|
/*
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl1.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.kra.protocol.actions.submit;
import org.kuali.kra.common.committee.meeting.CommitteeScheduleMinuteBase;
import org.kuali.kra.drools.brms.FactBean;
import org.kuali.kra.protocol.ProtocolBase;
import org.kuali.kra.protocol.ProtocolDao;
import org.kuali.rice.kim.api.identity.Person;
import org.kuali.rice.krad.service.BusinessObjectService;
import org.kuali.rice.krad.util.GlobalVariables;
import java.util.HashMap;
import java.util.Map;
/*
* This class is for the condition attributes of of the protocol action.
* i.e., the condition of protocol status, submissionstatus, action type code etc.
*/
public abstract class ProtocolActionMappingBase implements FactBean {
protected static final String PROTOCOL_NUMBER = "protocolNumber";
protected static final String SEQUENCE_NUMBER = "sequenceNumber";
protected static final String SUBMISSION_NUMBER = "submissionNumber";
protected BusinessObjectService businessObjectService;
protected ProtocolDao<? extends ProtocolBase> dao;
protected String submissionStatusCode;
protected String submissionTypeCode;
protected String protocolReviewTypeCode;
protected String actionTypeCode;
protected String protocolStatusCode;
protected String scheduleId;
protected ProtocolBase protocol;
protected Integer submissionNumber;
protected boolean allowed = false;
public ProtocolActionMappingBase(String actionTypeCode, String submissionStatusCode, String submissionTypeCode, String protocolReviewTypeCode, String protocolStatusCode, String scheduleId, Integer submissionNumber) {
super();
this.actionTypeCode = actionTypeCode;
this.submissionStatusCode = submissionStatusCode;
this.submissionTypeCode = submissionTypeCode;
this.protocolReviewTypeCode = protocolReviewTypeCode;
this.protocolStatusCode = protocolStatusCode;
this.scheduleId = scheduleId;
this.submissionNumber = submissionNumber;
}
public void setBusinessObjectService(BusinessObjectService businessObjectService) {
this.businessObjectService = businessObjectService;
}
public void setDao(ProtocolDao<? extends ProtocolBase> dao) {
this.dao = dao;
}
public void setProtocol(ProtocolBase protocol) {
this.protocol = protocol;
}
public String getProtocolSubmissionScheduleId() {
// TODO : should not need to retrieve from DB because protocol.getProtocolSubmission() is
// the same as the one retrieved. The positiveFieldValues are the pk in coeus.
// this is used in rule for null check.
if (protocol.getProtocolSubmission() == null) {
return null;
} else {
return protocol.getProtocolSubmission().getScheduleId();
}
}
/**
*
* This method if this submission has committee schedule minutes
* @return
*/
public boolean getMinutesCount() {
Map<String, Object> fieldValues = new HashMap<String, Object>();
fieldValues.put(PROTOCOL_NUMBER, protocol.getProtocolNumber());
fieldValues.put(SUBMISSION_NUMBER, protocol.getProtocolSubmission().getSubmissionNumber());
return businessObjectService.countMatching(getCommitteeScheduleMinuteBOClassHook(), fieldValues) > 0;
}
protected abstract Class<? extends CommitteeScheduleMinuteBase> getCommitteeScheduleMinuteBOClassHook();
public abstract boolean getSubmissionCount();
public abstract boolean getSubmissionCountCond2();
/**
*
* This method Check if there are any pending amendmends/renewals for this protocols
* @return
*/
public boolean getSubmissionCountCond3() {
return dao.getProtocolSubmissionCountFromProtocol(protocol.getProtocolNumber());
}
public abstract boolean getSubmissionCountCond4();
public abstract boolean getSubmissionCountCond5();
public abstract boolean getSubmissionCountForWithdraw();
/**
* This method finds number of reviewers tied to protocol submission. Implementation in lieu of following query
* SELECT count(OSP$PROTOCOL_REVIEWERS.PERSON_ID)
* INTO li_PersonCnt
* FROM OSP$PROTOCOL_REVIEWERS
* WHERE OSP$PROTOCOL_REVIEWERS.PROTOCOL_NUMBER = AS_PROTOCOL_NUMBER
* AND OSP$PROTOCOL_REVIEWERS.SUBMISSION_NUMBER = AS_SUBMISSION_NUMBER;
* @return
*/
public boolean getProtocolReviewerCountCond1() {
return protocol.getProtocolSubmission().getProtocolReviewers().size() > 0;
}
public String getActionTypeCode() {
return actionTypeCode;
}
public void setActionTypeCode(String actionTypeCode) {
this.actionTypeCode = actionTypeCode;
}
public String getSubmissionStatusCode() {
return submissionStatusCode;
}
public void setSubmissionStatusCode(String submissionStatusCode) {
this.submissionStatusCode = submissionStatusCode;
}
public String getSubmissionTypeCode() {
return submissionTypeCode;
}
public void setSubmissionTypeCode(String submissionTypeCode) {
this.submissionTypeCode = submissionTypeCode;
}
public String getProtocolReviewTypeCode() {
return protocolReviewTypeCode;
}
public void setProtocolReviewTypeCode(String protocolReviewTypeCode) {
this.protocolReviewTypeCode = protocolReviewTypeCode;
}
public String getProtocolStatusCode() {
return protocolStatusCode;
}
public void setProtocolStatusCode(String protocolStatusCode) {
this.protocolStatusCode = protocolStatusCode;
}
public String getScheduleId() {
return scheduleId;
}
public void setScheduleId(String scheduleId) {
this.scheduleId = scheduleId;
}
public Integer getSubmissionNumber() {
return submissionNumber;
}
public void setSubmissionNumber(Integer submissionNumber) {
this.submissionNumber = submissionNumber;
}
public boolean isAllowed() {
return allowed;
}
public void setAllowed(boolean allowed) {
this.allowed = allowed;
}
/**
* check if user is PI
*/
public boolean isPrincipalInvestigator() {
Person user = GlobalVariables.getUserSession().getPerson();
boolean isPi = false;
if (user.getPrincipalId().equals(protocol.getPrincipalInvestigator().getPersonId())) {
isPi = true;
}
return isPi;
}
}
|
|
/*
* Copyright 2000-2017 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
*/
package com.intellij.debugger.ui.tree.render;
import com.intellij.debugger.DebuggerManager;
import com.intellij.debugger.engine.*;
import com.intellij.debugger.engine.evaluation.EvaluateException;
import com.intellij.debugger.engine.evaluation.EvaluateExceptionUtil;
import com.intellij.debugger.engine.evaluation.EvaluationContext;
import com.intellij.debugger.engine.jdi.ThreadReferenceProxy;
import com.intellij.debugger.engine.managerThread.SuspendContextCommand;
import com.intellij.debugger.impl.DebuggerUtilsImpl;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.rt.debugger.BatchEvaluatorServer;
import com.intellij.util.containers.HashMap;
import com.sun.jdi.*;
import one.util.streamex.StreamEx;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
public class BatchEvaluator {
private static final Logger LOG = Logger.getInstance("#com.intellij.debugger.ui.tree.render.BatchEvaluator");
private final DebugProcess myDebugProcess;
private boolean myBatchEvaluatorChecked;
private ObjectReference myBatchEvaluatorObject;
private Method myBatchEvaluatorMethod;
private static final Key<BatchEvaluator> BATCH_EVALUATOR_KEY = new Key<>("BatchEvaluator");
public static final Key<Boolean> REMOTE_SESSION_KEY = new Key<>("is_remote_session_key");
private final HashMap<SuspendContext, List<ToStringCommand>> myBuffer = new HashMap<>();
private BatchEvaluator(DebugProcess process) {
myDebugProcess = process;
myDebugProcess.addDebugProcessListener(new DebugProcessListener() {
public void processDetached(DebugProcess process, boolean closedByUser) {
myBatchEvaluatorChecked = false;
myBatchEvaluatorObject= null;
myBatchEvaluatorMethod = null;
}
});
}
@SuppressWarnings({"HardCodedStringLiteral"}) public boolean hasBatchEvaluator(EvaluationContext evaluationContext) {
if (!myBatchEvaluatorChecked) {
myBatchEvaluatorChecked = true;
if (DebuggerUtilsImpl.isRemote(myDebugProcess)) {
// optimization: for remote sessions the BatchEvaluator is not there for sure
return false;
}
ThreadReferenceProxy thread = evaluationContext.getSuspendContext().getThread();
if (thread == null) {
return false;
}
ThreadReference threadReference = thread.getThreadReference();
if(threadReference == null) {
return false;
}
ClassType batchEvaluatorClass = null;
try {
batchEvaluatorClass = (ClassType)myDebugProcess.findClass(evaluationContext, BatchEvaluatorServer.class.getName(),
evaluationContext.getClassLoader());
}
catch (EvaluateException e) {
}
if (batchEvaluatorClass != null) {
Method constructor = batchEvaluatorClass.concreteMethodByName(JVMNameUtil.CONSTRUCTOR_NAME, "()V");
if(constructor != null){
ObjectReference evaluator = null;
try {
evaluator = myDebugProcess.newInstance(evaluationContext, batchEvaluatorClass, constructor, Collections.emptyList());
}
catch (Exception e) {
LOG.debug(e);
}
myBatchEvaluatorObject = evaluator;
if(myBatchEvaluatorObject != null) {
myBatchEvaluatorMethod = batchEvaluatorClass.concreteMethodByName("evaluate", "([Ljava/lang/Object;)[Ljava/lang/Object;");
}
}
}
}
return myBatchEvaluatorMethod != null;
}
public void invoke(ToStringCommand command) {
LOG.assertTrue(DebuggerManager.getInstance(myDebugProcess.getProject()).isDebuggerManagerThread());
final EvaluationContext evaluationContext = command.getEvaluationContext();
final SuspendContext suspendContext = evaluationContext.getSuspendContext();
if(!Registry.is("debugger.batch.evaluation") || !hasBatchEvaluator(evaluationContext)) {
myDebugProcess.getManagerThread().invokeCommand(command);
}
else {
List<ToStringCommand> toStringCommands = myBuffer.get(suspendContext);
if(toStringCommands == null) {
final List<ToStringCommand> commands = new ArrayList<>();
toStringCommands = commands;
myBuffer.put(suspendContext, commands);
myDebugProcess.getManagerThread().invokeCommand(new SuspendContextCommand() {
public SuspendContext getSuspendContext() {
return suspendContext;
}
public void action() {
myBuffer.remove(suspendContext);
if(!doEvaluateBatch(commands, evaluationContext)) {
commands.forEach(ToStringCommand::action);
}
}
public void commandCancelled() {
myBuffer.remove(suspendContext);
}
});
}
toStringCommands.add(command);
}
}
public static BatchEvaluator getBatchEvaluator(DebugProcess debugProcess) {
BatchEvaluator batchEvaluator = debugProcess.getUserData(BATCH_EVALUATOR_KEY);
if(batchEvaluator == null) {
batchEvaluator = new BatchEvaluator(debugProcess);
debugProcess.putUserData(BATCH_EVALUATOR_KEY, batchEvaluator);
}
return batchEvaluator;
}
@SuppressWarnings({"HardCodedStringLiteral"})
private boolean doEvaluateBatch(List<ToStringCommand> requests, EvaluationContext evaluationContext) {
try {
DebugProcess debugProcess = evaluationContext.getDebugProcess();
List<Value> values = StreamEx.of(requests).map(ToStringCommand::getValue).toList();
ArrayType objectArrayClass = (ArrayType)debugProcess.findClass(
evaluationContext,
"java.lang.Object[]",
evaluationContext.getClassLoader());
if (objectArrayClass == null) {
return false;
}
ArrayReference argArray = debugProcess.newInstance(objectArrayClass, values.size());
((SuspendContextImpl)evaluationContext.getSuspendContext()).keep(argArray); // to avoid ObjectCollectedException
argArray.setValues(values);
List argList = new ArrayList(1);
argList.add(argArray);
Value value = debugProcess.invokeMethod(evaluationContext, myBatchEvaluatorObject,
myBatchEvaluatorMethod, argList);
if (value instanceof ArrayReference) {
((SuspendContextImpl)evaluationContext.getSuspendContext()).keep((ArrayReference)value); // to avoid ObjectCollectedException for both the array and its elements
final ArrayReference strings = (ArrayReference)value;
final List<Value> allValuesArray = strings.getValues();
final Value[] allValues = allValuesArray.toArray(new Value[allValuesArray.size()]);
int idx = 0;
for (Iterator<ToStringCommand> iterator = requests.iterator(); iterator.hasNext(); idx++) {
ToStringCommand request = iterator.next();
final Value strValue = allValues[idx];
if(strValue == null || strValue instanceof StringReference){
try {
String str = (strValue == null)? null : ((StringReference)strValue).value();
request.evaluationResult(str);
}
catch (ObjectCollectedException e) {
// ignored
}
}
else if(strValue instanceof ObjectReference){
request.evaluationError(EvaluateExceptionUtil.createEvaluateException(new InvocationException((ObjectReference)strValue)).getMessage());
}
else {
LOG.assertTrue(false);
}
request.setEvaluated();
}
}
return true;
}
catch (ClassNotLoadedException | ObjectCollectedException | EvaluateException | InvalidTypeException e) {
}
return false;
}
}
|
|
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2014.09.17 at 10:19:27 AM IST
//
package com.pacificmetrics.saaif.passage1;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlID;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <extension base="{http://www.w3.org/1999/xhtml}Inline">
* <attGroup ref="{http://www.w3.org/1999/xhtml}attrs"/>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "")
@XmlRootElement(name = "sub")
public class Sub
extends Inline
{
@XmlAttribute(name = "id")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlID
@XmlSchemaType(name = "ID")
protected String id;
@XmlAttribute(name = "class")
@XmlSchemaType(name = "NMTOKENS")
protected List<String> clazz;
@XmlAttribute(name = "style")
protected String style;
@XmlAttribute(name = "title")
protected String title;
@XmlAttribute(name = "onclick")
protected String onclick;
@XmlAttribute(name = "ondblclick")
protected String ondblclick;
@XmlAttribute(name = "onmousedown")
protected String onmousedown;
@XmlAttribute(name = "onmouseup")
protected String onmouseup;
@XmlAttribute(name = "onmouseover")
protected String onmouseover;
@XmlAttribute(name = "onmousemove")
protected String onmousemove;
@XmlAttribute(name = "onmouseout")
protected String onmouseout;
@XmlAttribute(name = "onkeypress")
protected String onkeypress;
@XmlAttribute(name = "onkeydown")
protected String onkeydown;
@XmlAttribute(name = "onkeyup")
protected String onkeyup;
@XmlAttribute(name = "lang")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
protected String langCode;
@XmlAttribute(name = "lang", namespace = "http://www.w3.org/XML/1998/namespace")
protected String lang;
@XmlAttribute(name = "dir")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
protected String dir;
/**
* Gets the value of the id property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getId() {
return id;
}
/**
* Sets the value of the id property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setId(String value) {
this.id = value;
}
/**
* Gets the value of the clazz property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the clazz property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getClazz().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link String }
*
*
*/
public List<String> getClazz() {
if (clazz == null) {
clazz = new ArrayList<String>();
}
return this.clazz;
}
/**
* Gets the value of the style property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getStyle() {
return style;
}
/**
* Sets the value of the style property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setStyle(String value) {
this.style = value;
}
/**
* Gets the value of the title property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTitle() {
return title;
}
/**
* Sets the value of the title property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setTitle(String value) {
this.title = value;
}
/**
* Gets the value of the onclick property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnclick() {
return onclick;
}
/**
* Sets the value of the onclick property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnclick(String value) {
this.onclick = value;
}
/**
* Gets the value of the ondblclick property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOndblclick() {
return ondblclick;
}
/**
* Sets the value of the ondblclick property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOndblclick(String value) {
this.ondblclick = value;
}
/**
* Gets the value of the onmousedown property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmousedown() {
return onmousedown;
}
/**
* Sets the value of the onmousedown property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmousedown(String value) {
this.onmousedown = value;
}
/**
* Gets the value of the onmouseup property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmouseup() {
return onmouseup;
}
/**
* Sets the value of the onmouseup property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmouseup(String value) {
this.onmouseup = value;
}
/**
* Gets the value of the onmouseover property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmouseover() {
return onmouseover;
}
/**
* Sets the value of the onmouseover property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmouseover(String value) {
this.onmouseover = value;
}
/**
* Gets the value of the onmousemove property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmousemove() {
return onmousemove;
}
/**
* Sets the value of the onmousemove property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmousemove(String value) {
this.onmousemove = value;
}
/**
* Gets the value of the onmouseout property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmouseout() {
return onmouseout;
}
/**
* Sets the value of the onmouseout property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmouseout(String value) {
this.onmouseout = value;
}
/**
* Gets the value of the onkeypress property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnkeypress() {
return onkeypress;
}
/**
* Sets the value of the onkeypress property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnkeypress(String value) {
this.onkeypress = value;
}
/**
* Gets the value of the onkeydown property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnkeydown() {
return onkeydown;
}
/**
* Sets the value of the onkeydown property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnkeydown(String value) {
this.onkeydown = value;
}
/**
* Gets the value of the onkeyup property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnkeyup() {
return onkeyup;
}
/**
* Sets the value of the onkeyup property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnkeyup(String value) {
this.onkeyup = value;
}
/**
* Gets the value of the langCode property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getLangCode() {
return langCode;
}
/**
* Sets the value of the langCode property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setLangCode(String value) {
this.langCode = value;
}
/**
* Gets the value of the lang property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getLang() {
return lang;
}
/**
* Sets the value of the lang property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setLang(String value) {
this.lang = value;
}
/**
* Gets the value of the dir property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getDir() {
return dir;
}
/**
* Sets the value of the dir property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setDir(String value) {
this.dir = value;
}
}
|
|
/**
* Copyright Microsoft Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.microsoft.azure.storage.file;
import com.microsoft.azure.storage.NameValidator;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageErrorCodeStrings;
import com.microsoft.azure.storage.StorageEvent;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.TestRunners.CloudTests;
import com.microsoft.azure.storage.TestRunners.DevFabricTests;
import com.microsoft.azure.storage.TestRunners.DevStoreTests;
import com.microsoft.azure.storage.TestRunners.SlowTests;
import com.microsoft.azure.storage.core.SR;
import com.microsoft.azure.storage.core.UriQueryBuilder;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URISyntaxException;
import java.util.Calendar;
import java.util.Date;
import java.util.EnumSet;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.TimeZone;
import static org.junit.Assert.*;
/**
* File Share Tests
*/
@Category({ DevFabricTests.class, DevStoreTests.class, CloudTests.class })
public class CloudFileShareTests {
protected static CloudFileClient client;
protected CloudFileShare share;
@Before
public void fileShareTestMethodSetUp() throws StorageException, URISyntaxException {
this.share = FileTestHelper.getRandomShareReference();
}
@After
public void fileShareTestMethodTearDown() throws StorageException {
this.share.deleteIfExists(DeleteShareSnapshotsOption.INCLUDE_SNAPSHOTS, null, null, null);
}
/**
* Test share name validation.
*/
@Test
public void testCloudShareNameValidation()
{
NameValidator.validateShareName("alpha");
NameValidator.validateShareName("4lphanum3r1c");
NameValidator.validateShareName("middle-dash");
invalidShareTestHelper(null, "Null not allowed.", "Invalid share name. The name may not be null, empty, or whitespace only.");
invalidShareTestHelper("$root", "Alphanumeric or dashes only.", "Invalid share name. Check MSDN for more information about valid naming.");
invalidShareTestHelper("double--dash", "No double dash.", "Invalid share name. Check MSDN for more information about valid naming.");
invalidShareTestHelper("CapsLock", "Lowercase only.", "Invalid share name. Check MSDN for more information about valid naming.");
invalidShareTestHelper("illegal$char", "Alphanumeric or dashes only.", "Invalid share name. Check MSDN for more information about valid naming.");
invalidShareTestHelper("illegal!char", "Alphanumeric or dashes only.", "Invalid share name. Check MSDN for more information about valid naming.");
invalidShareTestHelper("white space", "Alphanumeric or dashes only.", "Invalid share name. Check MSDN for more information about valid naming.");
invalidShareTestHelper("2c", "Between 3 and 63 characters.", "Invalid share name length. The name must be between 3 and 63 characters long.");
invalidShareTestHelper(new String(new char[64]).replace("\0", "n"), "Between 3 and 63 characters.", "Invalid share name length. The name must be between 3 and 63 characters long.");
}
private void invalidShareTestHelper(String shareName, String failMessage, String exceptionMessage)
{
try
{
NameValidator.validateShareName(shareName);
fail(failMessage);
}
catch (IllegalArgumentException e)
{
assertEquals(exceptionMessage, e.getMessage());
}
}
/**
* Validate share references
*
* @throws StorageException
* @throws URISyntaxException
*/
@Test
public void testCloudFileShareReference() throws StorageException, URISyntaxException {
CloudFileClient client = FileTestHelper.createCloudFileClient();
CloudFileShare share = client.getShareReference("share");
CloudFileDirectory directory = share.getRootDirectoryReference().getDirectoryReference("directory3");
CloudFileDirectory directory2 = directory.getDirectoryReference("directory4");
assertEquals(share.getStorageUri().toString(), directory.getShare().getStorageUri().toString());
assertEquals(share.getStorageUri().toString(), directory2.getShare().getStorageUri().toString());
assertEquals(share.getStorageUri().toString(), directory2.getParent().getShare().getStorageUri().toString());
}
/**
* Try to create a share after it is created
*
* @throws StorageException
*/
@Test
public void testCloudFileShareCreate() throws StorageException {
this.share.create();
assertTrue(this.share.exists());
try {
this.share.create();
fail("Share already existed but was created anyway.");
}
catch (StorageException e) {
assertEquals(e.getErrorCode(), "ShareAlreadyExists");
assertEquals(e.getHttpStatusCode(), 409);
assertEquals(e.getMessage(), "The specified share already exists.");
}
}
/**
* CreateIfNotExists test.
*
* @throws StorageException
*/
@Test
public void testCloudFileShareCreateIfNotExists() throws StorageException {
assertTrue(this.share.createIfNotExists());
assertTrue(this.share.exists());
assertFalse(this.share.createIfNotExists());
}
/**
* DeleteIfExists test.
*
* @throws StorageException
*/
@Test
public void testCloudFileShareDeleteIfExists() throws StorageException {
assertFalse(this.share.deleteIfExists());
this.share.create();
assertTrue(this.share.deleteIfExists());
assertFalse(this.share.exists());
assertFalse(this.share.deleteIfExists());
}
/**
* Check a share's existence
*
* @throws StorageException
*/
@Test
public void testCloudFileShareExists() throws StorageException {
assertFalse(this.share.exists());
this.share.create();
assertTrue(this.share.exists());
assertNotNull(this.share.getProperties().getEtag());
this.share.delete();
assertFalse(this.share.exists());
}
/**
* Set and delete share permissions
*
* @throws URISyntaxException
* @throws StorageException
* @throws InterruptedException
*/
@Test
@Category({ SlowTests.class, DevFabricTests.class, DevStoreTests.class })
public void testCloudFileShareSetPermissions()
throws StorageException, InterruptedException, URISyntaxException {
CloudFileClient client = FileTestHelper.createCloudFileClient();
this.share.create();
FileSharePermissions permissions = this.share.downloadPermissions();
assertEquals(0, permissions.getSharedAccessPolicies().size());
final Calendar cal = new GregorianCalendar(TimeZone.getTimeZone("UTC"));
final Date start = cal.getTime();
cal.add(Calendar.MINUTE, 30);
final Date expiry = cal.getTime();
SharedAccessFilePolicy policy = new SharedAccessFilePolicy();
policy.setPermissions(EnumSet.of(SharedAccessFilePermissions.LIST, SharedAccessFilePermissions.CREATE));
policy.setSharedAccessStartTime(start);
policy.setSharedAccessExpiryTime(expiry);
permissions.getSharedAccessPolicies().put("key1", policy);
// Set permissions and wait for them to propagate
this.share.uploadPermissions(permissions);
Thread.sleep(30000);
// Check if permissions were set
CloudFileShare share2 = client.getShareReference(this.share.getName());
assertPermissionsEqual(permissions, share2.downloadPermissions());
// Clear permissions and wait for them to propagate
permissions.getSharedAccessPolicies().clear();
this.share.uploadPermissions(permissions);
Thread.sleep(30000);
// Check if permissions were cleared
assertPermissionsEqual(permissions, share2.downloadPermissions());
}
/**
* Get permissions from string
*/
@Test
@Category({ DevFabricTests.class, DevStoreTests.class })
public void testCloudFileSharePermissionsFromString() {
SharedAccessFilePolicy policy = new SharedAccessFilePolicy();
policy.setPermissionsFromString("rcwdl");
assertEquals(EnumSet.of(SharedAccessFilePermissions.READ, SharedAccessFilePermissions.CREATE,
SharedAccessFilePermissions.WRITE, SharedAccessFilePermissions.DELETE, SharedAccessFilePermissions.LIST),
policy.getPermissions());
policy.setPermissionsFromString("rwdl");
assertEquals(EnumSet.of(SharedAccessFilePermissions.READ, SharedAccessFilePermissions.WRITE,
SharedAccessFilePermissions.DELETE, SharedAccessFilePermissions.LIST), policy.getPermissions());
policy.setPermissionsFromString("rwl");
assertEquals(EnumSet.of(SharedAccessFilePermissions.READ, SharedAccessFilePermissions.WRITE,
SharedAccessFilePermissions.LIST), policy.getPermissions());
policy.setPermissionsFromString("wr");
assertEquals(EnumSet.of(SharedAccessFilePermissions.WRITE, SharedAccessFilePermissions.READ),
policy.getPermissions());
policy.setPermissionsFromString("d");
assertEquals(EnumSet.of(SharedAccessFilePermissions.DELETE), policy.getPermissions());
}
/**
* Write permission to string
*/
@Test
@Category({ DevFabricTests.class, DevStoreTests.class })
public void testCloudFileSharePermissionsToString() {
SharedAccessFilePolicy policy = new SharedAccessFilePolicy();
policy.setPermissions(EnumSet.of(SharedAccessFilePermissions.READ, SharedAccessFilePermissions.CREATE,
SharedAccessFilePermissions.WRITE, SharedAccessFilePermissions.DELETE, SharedAccessFilePermissions.LIST));
assertEquals("rcwdl", policy.permissionsToString());
policy.setPermissions(EnumSet.of(SharedAccessFilePermissions.READ, SharedAccessFilePermissions.WRITE,
SharedAccessFilePermissions.DELETE, SharedAccessFilePermissions.LIST));
assertEquals("rwdl", policy.permissionsToString());
policy.setPermissions(EnumSet.of(SharedAccessFilePermissions.READ, SharedAccessFilePermissions.WRITE,
SharedAccessFilePermissions.LIST));
assertEquals("rwl", policy.permissionsToString());
policy.setPermissions(EnumSet.of(SharedAccessFilePermissions.WRITE, SharedAccessFilePermissions.READ));
assertEquals("rw", policy.permissionsToString());
policy.setPermissions(EnumSet.of(SharedAccessFilePermissions.DELETE));
assertEquals("d", policy.permissionsToString());
}
/**
* Check uploading/downloading share metadata.
*
* @throws StorageException
* @throws URISyntaxException
*/
@Test
public void testCloudFileShareUploadMetadata() throws StorageException, URISyntaxException {
this.share.getMetadata().put("key1", "value1");
this.share.create();
assertEquals(1, this.share.getMetadata().size());
assertEquals("value1", this.share.getMetadata().get("key1"));
CloudFileShare share2 = this.share.getServiceClient().getShareReference(this.share.getName());
share2.downloadAttributes();
assertEquals(1, share2.getMetadata().size());
assertEquals("value1", share2.getMetadata().get("key1"));
this.share.getMetadata().put("key2", "value2");
assertEquals(2, this.share.getMetadata().size());
assertEquals("value1", this.share.getMetadata().get("key1"));
assertEquals("value2", this.share.getMetadata().get("key2"));
this.share.uploadMetadata();
assertEquals(2, this.share.getMetadata().size());
assertEquals("value1", this.share.getMetadata().get("key1"));
assertEquals("value2", this.share.getMetadata().get("key2"));
share2.downloadAttributes();
assertEquals(2, this.share.getMetadata().size());
assertEquals("value1", this.share.getMetadata().get("key1"));
assertEquals("value2", this.share.getMetadata().get("key2"));
Iterable<CloudFileShare> shares = this.share.getServiceClient().listShares(this.share.getName(),
EnumSet.of(ShareListingDetails.METADATA), null, null);
for (CloudFileShare share3 : shares) {
assertEquals(2, share3.getMetadata().size());
assertEquals("value1", share3.getMetadata().get("key1"));
assertEquals("value2", this.share.getMetadata().get("key2"));
}
this.share.getMetadata().clear();
this.share.uploadMetadata();
share2.downloadAttributes();
assertEquals(0, share2.getMetadata().size());
}
/**
* Check uploading/downloading invalid share metadata.
*/
@Test
public void testCloudFileShareInvalidMetadata() {
// test client-side fails correctly
testMetadataFailures(this.share, null, "value1", true);
testMetadataFailures(this.share, "", "value1", true);
testMetadataFailures(this.share, " ", "value1", true);
testMetadataFailures(this.share, "\n \t", "value1", true);
testMetadataFailures(this.share, "key1", null, false);
testMetadataFailures(this.share, "key1", "", false);
testMetadataFailures(this.share, "key1", " ", false);
testMetadataFailures(this.share, "key1", "\n \t", false);
}
private static void testMetadataFailures(CloudFileShare share, String key, String value, boolean badKey) {
share.getMetadata().put(key, value);
try {
share.uploadMetadata();
fail(SR.METADATA_KEY_INVALID);
}
catch (StorageException e) {
if (badKey) {
assertEquals(SR.METADATA_KEY_INVALID, e.getMessage());
}
else {
assertEquals(SR.METADATA_VALUE_INVALID, e.getMessage());
}
}
share.getMetadata().remove(key);
}
/**
* Tests whether Share Stats can be updated and downloaded.
*
* @throws StorageException
* @throws IOException
* @throws URISyntaxException
*/
@Test
@Category({ CloudTests.class })
public void testGetShareStats() throws StorageException, IOException, URISyntaxException {
share.createIfNotExists();
ShareStats stats = share.getStats();
assertNotNull(stats);
assertEquals(0, stats.getUsage());
FileTestHelper.uploadNewFile(share, 512, null);
stats = share.getStats();
assertNotNull(stats);
assertEquals(1, stats.getUsage());
}
/**
* Test that Share Quota can be set, but only to allowable values.
*
* @throws StorageException
* @throws URISyntaxException
*/
@Test
public void testCloudFileShareQuota() throws StorageException, URISyntaxException {
// Share quota defaults to 5120
this.share.createIfNotExists();
this.share.downloadAttributes();
assertNotNull(this.share.getProperties().getShareQuota());
int shareQuota = FileConstants.MAX_SHARE_QUOTA;
assertEquals(shareQuota, this.share.getProperties().getShareQuota().intValue());
// Upload new share quota
shareQuota = 8;
this.share.getProperties().setShareQuota(shareQuota);
this.share.uploadProperties();
this.share.downloadAttributes();
assertNotNull(this.share.getProperties().getShareQuota());
assertEquals(shareQuota, this.share.getProperties().getShareQuota().intValue());
this.share.delete();
// Create a share with quota already set
shareQuota = 16;
this.share = FileTestHelper.getRandomShareReference();
this.share.getProperties().setShareQuota(shareQuota);
this.share.create();
assertNotNull(this.share.getProperties().getShareQuota());
assertEquals(shareQuota, this.share.getProperties().getShareQuota().intValue());
this.share.downloadAttributes();
assertNotNull(this.share.getProperties().getShareQuota());
assertEquals(shareQuota, this.share.getProperties().getShareQuota().intValue());
// Attempt to set illegal share quota
try {
shareQuota = FileConstants.MAX_SHARE_QUOTA + 1;
this.share.getProperties().setShareQuota(shareQuota);
fail();
} catch (IllegalArgumentException e) {
assertEquals(String.format(SR.PARAMETER_NOT_IN_RANGE, "Share Quota", 1, FileConstants.MAX_SHARE_QUOTA),
e.getMessage());
}
}
/**
* Test that Share Quota can be set, but only to allowable values.
*
* @throws StorageException
* @throws URISyntaxException
*/
@Test
public void testCloudFileShareQuotaListing() throws StorageException, URISyntaxException {
int shareQuota = 16;
this.share.getProperties().setShareQuota(shareQuota);
this.share.createIfNotExists();
Iterable<CloudFileShare> shares = this.share.getServiceClient().listShares(this.share.getName());
for (CloudFileShare fileShare : shares) {
assertEquals(shareQuota, fileShare.getProperties().getShareQuota().intValue());
}
}
/**
* Test specific deleteIfExists case.
*
* @throws StorageException
*/
@Test
public void testCloudFileShareDeleteIfExistsErrorCode() throws StorageException {
try {
this.share.delete();
fail("Share should not already exist.");
}
catch (StorageException e) {
assertEquals(StorageErrorCodeStrings.SHARE_NOT_FOUND, e.getErrorCode());
}
OperationContext ctx = new OperationContext();
ctx.getSendingRequestEventHandler().addListener(new StorageEvent<SendingRequestEvent>() {
@Override
public void eventOccurred(SendingRequestEvent eventArg) {
if (((HttpURLConnection) eventArg.getConnectionObject()).getRequestMethod().equals("DELETE")) {
try {
CloudFileShareTests.this.share.delete();
assertFalse(CloudFileShareTests.this.share.exists());
}
catch (StorageException e) {
fail("Delete should succeed.");
}
}
}
});
this.share.create();
// Share deletes succeed before garbage collection occurs.
assertTrue(this.share.deleteIfExists(null, null, ctx));
}
@Test
public void testCreateShareSnapshot() throws StorageException, URISyntaxException, IOException {
// create share with metadata
this.share.create();
assertTrue(this.share.exists());
HashMap<String, String> shareMeta = new HashMap<String, String>();
shareMeta.put("key1", "value1");
this.share.setMetadata(shareMeta);
this.share.uploadMetadata();
CloudFileDirectory dir1 = this.share.getRootDirectoryReference().getDirectoryReference("dir1");
dir1.create();
CloudFile file1 = dir1.getFileReference("file1");
file1.create(1024);
ByteArrayInputStream srcStream = FileTestHelper.getRandomDataStream(1024);
file1.upload(srcStream, 1024);
// create directory with metadata
HashMap<String, String> dirMeta = new HashMap<String, String>();
dirMeta.put("key2", "value2");
dir1.setMetadata(dirMeta);
dir1.uploadMetadata();
// verify that exists() call on snapshot populates metadata
CloudFileShare snapshot = this.share.createSnapshot();
CloudFileClient client = FileTestHelper.createCloudFileClient();
CloudFileShare snapshotRef = client.getShareReference(snapshot.name, snapshot.snapshotID);
assertTrue(snapshotRef.exists());
assertTrue(snapshotRef.getMetadata().size() == 1 && snapshotRef.getMetadata().get("key1").equals("value1"));
// verify that downloadAttributes() populates metadata
CloudFileShare snapshotRef2 = client.getShareReference(snapshot.name, snapshot.snapshotID);
snapshotRef2.downloadAttributes();
snapshot.downloadAttributes();
assertTrue(snapshotRef2.getMetadata().size() == 1 && snapshotRef2.getMetadata().get("key1").equals("value1"));
assertTrue(snapshot.getMetadata().size() == 1 && snapshot.getMetadata().get("key1").equals("value1"));
// verify that exists() populates the metadata
CloudFileDirectory snapshotDir1 = snapshot.getRootDirectoryReference().getDirectoryReference("dir1");
snapshotDir1.exists();
assertTrue(snapshotDir1.getMetadata().size() == 1 && snapshotDir1.getMetadata().get("key2").equals("value2"));
// verify that downloadAttributes() populates the metadata
CloudFileDirectory snapshotDir2 = snapshot.getRootDirectoryReference().getDirectoryReference("dir1");
snapshotDir2.downloadAttributes();
assertTrue(snapshotDir2.getMetadata().size() == 1 && snapshotDir2.getMetadata().get("key2").equals("value2"));
// create snapshot with metadata
HashMap<String, String> shareMeta2 = new HashMap<String, String>();
shareMeta2.put("abc", "def");
CloudFileShare snapshotRef3 = this.share.createSnapshot(shareMeta2, null, null, null);
CloudFileShare snapshotRef4 = client.getShareReference(snapshotRef3.name, snapshotRef3.snapshotID);
assertTrue(snapshotRef4.exists());
assertTrue(snapshotRef4.getMetadata().size() == 1 && snapshotRef4.getMetadata().get("abc").equals("def"));
final UriQueryBuilder uriBuilder = new UriQueryBuilder();
uriBuilder.add("sharesnapshot", snapshot.snapshotID);
CloudFileShare snapshotRef5 = new CloudFileShare(uriBuilder.addToURI(this.share.getUri()),
this.share.getServiceClient().getCredentials());
assertEquals(snapshot.snapshotID, snapshotRef5.snapshotID);
assertTrue(snapshotRef5.exists());
snapshot.delete();
}
@Test
public void testDeleteShareSnapshotOptions() throws StorageException, URISyntaxException, IOException {
// create share with metadata
this.share.create();
assertTrue(this.share.exists());
// verify that exists() call on snapshot populates metadata
CloudFileShare snapshot = this.share.createSnapshot();
CloudFileClient client = FileTestHelper.createCloudFileClient();
CloudFileShare snapshotRef = client.getShareReference(snapshot.name, snapshot.snapshotID);
assertTrue(snapshotRef.exists());
try {
share.delete();
}
catch (final StorageException e) {
assertEquals(StorageErrorCodeStrings.SHARE_HAS_SNAPSHOTS, e.getErrorCode());
}
share.delete(DeleteShareSnapshotsOption.INCLUDE_SNAPSHOTS, null, null, null);
assertFalse(share.exists());
assertFalse(snapshot.exists());
}
@Test
public void testListFilesAndDirectoriesWithinShareSnapshot() throws StorageException, URISyntaxException {
this.share.create();
CloudFileDirectory myDir = this.share.getRootDirectoryReference().getDirectoryReference("mydir");
myDir.create();
myDir.getFileReference("myfile").create(1024);
myDir.getDirectoryReference("yourDir").create();
assertTrue(this.share.exists());
CloudFileShare snapshot = this.share.createSnapshot();
CloudFileClient client = FileTestHelper.createCloudFileClient();
CloudFileShare snapshotRef = client.getShareReference(snapshot.name, snapshot.snapshotID);
Iterable<ListFileItem> listResult = snapshotRef.getRootDirectoryReference().listFilesAndDirectories();
int count = 0;
for (ListFileItem listFileItem : listResult) {
count++;
assertEquals("mydir", ((CloudFileDirectory) listFileItem).getName());
}
assertEquals(1, count);
count = 0;
listResult = snapshotRef.getRootDirectoryReference().getDirectoryReference("mydir").listFilesAndDirectories();
for (ListFileItem listFileItem : listResult) {
if (listFileItem instanceof CloudFileDirectory) {
count++;
assertEquals("yourDir", ((CloudFileDirectory) listFileItem).getName());
}
else {
count++;
assertEquals("myfile", ((CloudFile) listFileItem).getName());
}
}
assertEquals(2, count);
snapshot.delete();
}
@Test
public void testUnsupportedApisShareSnapshot() throws StorageException, URISyntaxException {
CloudFileClient client = FileTestHelper.createCloudFileClient();
this.share.create();
this.share.downloadPermissions();
CloudFileShare snapshot = this.share.createSnapshot();
try {
snapshot.createSnapshot();
fail("Shouldn't get here");
}
catch (IllegalArgumentException e) {
assertEquals(SR.INVALID_OPERATION_FOR_A_SHARE_SNAPSHOT, e.getMessage());
}
try {
snapshot.downloadPermissions();
fail("Shouldn't get here");
}
catch (IllegalArgumentException e) {
assertEquals(SR.INVALID_OPERATION_FOR_A_SHARE_SNAPSHOT, e.getMessage());
}
try {
snapshot.getStats();
fail("Shouldn't get here");
}
catch (IllegalArgumentException e) {
assertEquals(SR.INVALID_OPERATION_FOR_A_SHARE_SNAPSHOT, e.getMessage());
}
try {
snapshot.uploadMetadata();
fail("Shouldn't get here");
}
catch (IllegalArgumentException e) {
assertEquals(SR.INVALID_OPERATION_FOR_A_SHARE_SNAPSHOT, e.getMessage());
}
try {
FileSharePermissions permissions = new FileSharePermissions();
snapshot.uploadPermissions(permissions);
fail("Shouldn't get here");
}
catch (IllegalArgumentException e) {
assertEquals(SR.INVALID_OPERATION_FOR_A_SHARE_SNAPSHOT, e.getMessage());
}
try {
snapshot.uploadProperties();
fail("Shouldn't get here");
}
catch (IllegalArgumentException e) {
assertEquals(SR.INVALID_OPERATION_FOR_A_SHARE_SNAPSHOT, e.getMessage());
}
snapshot.delete();
}
private static void assertPermissionsEqual(FileSharePermissions expected, FileSharePermissions actual) {
HashMap<String, SharedAccessFilePolicy> expectedPolicies = expected.getSharedAccessPolicies();
HashMap<String, SharedAccessFilePolicy> actualPolicies = actual.getSharedAccessPolicies();
assertEquals("SharedAccessPolicies.Count", expectedPolicies.size(), actualPolicies.size());
for (String name : expectedPolicies.keySet()) {
assertTrue("Key" + name + " doesn't exist", actualPolicies.containsKey(name));
SharedAccessFilePolicy expectedPolicy = expectedPolicies.get(name);
SharedAccessFilePolicy actualPolicy = actualPolicies.get(name);
assertEquals("Policy: " + name + "\tPermissions\n", expectedPolicy.getPermissions().toString(),
actualPolicy.getPermissions().toString());
assertEquals("Policy: " + name + "\tStartDate\n", expectedPolicy.getSharedAccessStartTime().toString(),
actualPolicy.getSharedAccessStartTime().toString());
assertEquals("Policy: " + name + "\tExpireDate\n", expectedPolicy.getSharedAccessExpiryTime().toString(),
actualPolicy.getSharedAccessExpiryTime().toString());
}
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.sql.parser;
import org.apache.flink.sql.parser.ddl.SqlCreateTable;
import org.apache.flink.sql.parser.error.SqlValidateException;
import org.apache.flink.sql.parser.impl.FlinkSqlParserImpl;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParserImplFactory;
import org.apache.calcite.sql.parser.SqlParserTest;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeDiagnosingMatcher;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
/** FlinkSqlParserImpl tests. **/
public class FlinkSqlParserImplTest extends SqlParserTest {
@Override
protected SqlParserImplFactory parserImplFactory() {
return FlinkSqlParserImpl.FACTORY;
}
@Test
public void testShowCatalogs() {
sql("show catalogs").ok("SHOW CATALOGS");
}
@Test
public void testShowCurrentCatalog() {
sql("show current catalog").ok("SHOW CURRENT CATALOG");
}
@Test
public void testDescribeCatalog() {
sql("describe catalog a").ok("DESCRIBE CATALOG `A`");
}
/**
* Here we override the super method to avoid test error from `describe schema` supported in original calcite.
*/
@Ignore
@Test
public void testDescribeSchema() {
}
@Test
public void testUseCatalog() {
sql("use catalog a").ok("USE CATALOG `A`");
}
@Test
public void testCreateCatalog() {
sql("create catalog c1\n" +
" WITH (\n" +
" 'key1'='value1',\n" +
" 'key2'='value2'\n" +
" )\n")
.ok("CREATE CATALOG `C1` " +
"WITH (\n" +
" 'key1' = 'value1',\n" +
" 'key2' = 'value2'\n" +
")");
}
@Test
public void testDropCatalog() {
sql("drop catalog c1").ok("DROP CATALOG `C1`");
}
@Test
public void testShowDataBases() {
sql("show databases").ok("SHOW DATABASES");
}
@Test
public void testShowCurrentDatabase() {
sql("show current database").ok("SHOW CURRENT DATABASE");
}
@Test
public void testUseDataBase() {
sql("use default_db").ok("USE `DEFAULT_DB`");
sql("use defaultCatalog.default_db").ok("USE `DEFAULTCATALOG`.`DEFAULT_DB`");
}
@Test
public void testCreateDatabase() {
sql("create database db1").ok("CREATE DATABASE `DB1`");
sql("create database if not exists db1").ok("CREATE DATABASE IF NOT EXISTS `DB1`");
sql("create database catalog1.db1").ok("CREATE DATABASE `CATALOG1`.`DB1`");
final String sql = "create database db1 comment 'test create database'";
final String expected = "CREATE DATABASE `DB1`\n"
+ "COMMENT 'test create database'";
sql(sql).ok(expected);
final String sql1 = "create database db1 comment 'test create database'"
+ "with ( 'key1' = 'value1', 'key2.a' = 'value2.a')";
final String expected1 = "CREATE DATABASE `DB1`\n"
+ "COMMENT 'test create database' WITH (\n"
+ " 'key1' = 'value1',\n"
+ " 'key2.a' = 'value2.a'\n"
+ ")";
sql(sql1).ok(expected1);
}
@Test
public void testDropDatabase() {
sql("drop database db1").ok("DROP DATABASE `DB1` RESTRICT");
sql("drop database catalog1.db1").ok("DROP DATABASE `CATALOG1`.`DB1` RESTRICT");
sql("drop database db1 RESTRICT").ok("DROP DATABASE `DB1` RESTRICT");
sql("drop database db1 CASCADE").ok("DROP DATABASE `DB1` CASCADE");
}
@Test
public void testAlterDatabase() {
final String sql = "alter database db1 set ('key1' = 'value1','key2.a' = 'value2.a')";
final String expected = "ALTER DATABASE `DB1` SET (\n"
+ " 'key1' = 'value1',\n"
+ " 'key2.a' = 'value2.a'\n"
+ ")";
sql(sql).ok(expected);
}
@Test
public void testDescribeDatabase() {
sql("describe database db1").ok("DESCRIBE DATABASE `DB1`");
sql("describe database catlog1.db1").ok("DESCRIBE DATABASE `CATLOG1`.`DB1`");
sql("describe database extended db1").ok("DESCRIBE DATABASE EXTENDED `DB1`");
}
@Test
public void testAlterFunction() {
sql("alter function function1 as 'org.apache.fink.function.function1'")
.ok("ALTER FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("alter temporary function function1 as 'org.apache.fink.function.function1'")
.ok("ALTER TEMPORARY FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("alter temporary function function1 as 'org.apache.fink.function.function1' language scala")
.ok("ALTER TEMPORARY FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1' LANGUAGE SCALA");
sql("alter temporary system function function1 as 'org.apache.fink.function.function1'")
.ok("ALTER TEMPORARY SYSTEM FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("alter temporary system function function1 as 'org.apache.fink.function.function1' language java")
.ok("ALTER TEMPORARY SYSTEM FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1' LANGUAGE JAVA");
}
@Test
public void testShowFuntions() {
sql("show functions").ok("SHOW FUNCTIONS");
sql("show functions db1").ok("SHOW FUNCTIONS `DB1`");
sql("show functions catalog1.db1").ok("SHOW FUNCTIONS `CATALOG1`.`DB1`");
}
@Test
public void testShowTables() {
sql("show tables").ok("SHOW TABLES");
}
@Test
public void testDescribeTable() {
sql("describe tbl").ok("DESCRIBE `TBL`");
sql("describe catlog1.db1.tbl").ok("DESCRIBE `CATLOG1`.`DB1`.`TBL`");
sql("describe extended db1").ok("DESCRIBE EXTENDED `DB1`");
}
/**
* Here we override the super method to avoid test error from `describe statement` supported in original calcite.
*/
@Ignore
@Test
public void testDescribeStatement() {
}
@Test
public void testAlterTable() {
sql("alter table t1 rename to t2").ok("ALTER TABLE `T1` RENAME TO `T2`");
sql("alter table c1.d1.t1 rename to t2").ok("ALTER TABLE `C1`.`D1`.`T1` RENAME TO `T2`");
final String sql0 = "alter table t1 set ('key1'='value1')";
final String expected0 = "ALTER TABLE `T1` SET (\n"
+ " 'key1' = 'value1'\n"
+ ")";
sql(sql0).ok(expected0);
final String sql1 = "alter table t1 "
+ "add constraint ct1 primary key(a, b) not enforced";
final String expected1 = "ALTER TABLE `T1` "
+ "ADD CONSTRAINT `CT1` PRIMARY KEY (`A`, `B`) NOT ENFORCED";
sql(sql1).ok(expected1);
final String sql2 = "alter table t1 "
+ "add unique(a, b)";
final String expected2 = "ALTER TABLE `T1` "
+ "ADD UNIQUE (`A`, `B`)";
sql(sql2).ok(expected2);
final String sql3 = "alter table t1 drop constraint ct1";
final String expected3 = "ALTER TABLE `T1` DROP CONSTRAINT `CT1`";
sql(sql3).ok(expected3);
}
@Test
public void testCreateTable() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint,\n" +
" h varchar, \n" +
" g as 2 * (a + 1), \n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'), \n" +
" b varchar,\n" +
" proc as PROCTIME(), \n" +
" meta STRING METADATA, \n" +
" my_meta STRING METADATA FROM 'meta', \n" +
" my_meta STRING METADATA FROM 'meta' VIRTUAL, \n" +
" meta STRING METADATA VIRTUAL, \n" +
" PRIMARY KEY (a, b)\n" +
")\n" +
"PARTITIONED BY (a, h)\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` BIGINT,\n" +
" `H` VARCHAR,\n" +
" `G` AS (2 * (`A` + 1)),\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" `META` STRING METADATA,\n" +
" `MY_META` STRING METADATA FROM 'meta',\n" +
" `MY_META` STRING METADATA FROM 'meta' VIRTUAL,\n" +
" `META` STRING METADATA VIRTUAL,\n" +
" PRIMARY KEY (`A`, `B`)\n" +
")\n" +
"PARTITIONED BY (`A`, `H`)\n" +
"WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableIfNotExists() {
final String sql = "CREATE TABLE IF NOT EXISTS tbl1 (\n" +
" a bigint,\n" +
" h varchar, \n" +
" g as 2 * (a + 1), \n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'), \n" +
" b varchar,\n" +
" proc as PROCTIME(), \n" +
" PRIMARY KEY (a, b)\n" +
")\n" +
"PARTITIONED BY (a, h)\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE IF NOT EXISTS `TBL1` (\n" +
" `A` BIGINT,\n" +
" `H` VARCHAR,\n" +
" `G` AS (2 * (`A` + 1)),\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" PRIMARY KEY (`A`, `B`)\n" +
")\n" +
"PARTITIONED BY (`A`, `H`)\n" +
"WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithComment() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint comment 'test column comment AAA.',\n" +
" h varchar, \n" +
" g as 2 * (a + 1), \n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'), \n" +
" b varchar,\n" +
" proc as PROCTIME(), \n" +
" meta STRING METADATA COMMENT 'c1', \n" +
" my_meta STRING METADATA FROM 'meta' COMMENT 'c2', \n" +
" my_meta STRING METADATA FROM 'meta' VIRTUAL COMMENT 'c3', \n" +
" meta STRING METADATA VIRTUAL COMMENT 'c4', \n" +
" PRIMARY KEY (a, b)\n" +
")\n" +
"comment 'test table comment ABC.'\n" +
"PARTITIONED BY (a, h)\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` BIGINT COMMENT 'test column comment AAA.',\n" +
" `H` VARCHAR,\n" +
" `G` AS (2 * (`A` + 1)),\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" `META` STRING METADATA COMMENT 'c1',\n" +
" `MY_META` STRING METADATA FROM 'meta' COMMENT 'c2',\n" +
" `MY_META` STRING METADATA FROM 'meta' VIRTUAL COMMENT 'c3',\n" +
" `META` STRING METADATA VIRTUAL COMMENT 'c4',\n" +
" PRIMARY KEY (`A`, `B`)\n" +
")\n" +
"COMMENT 'test table comment ABC.'\n" +
"PARTITIONED BY (`A`, `H`)\n" +
"WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithCommentOnComputedColumn() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint comment 'test column comment AAA.',\n" +
" h varchar, \n" +
" g as 2 * (a + 1) comment 'test computed column.', \n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'), \n" +
" b varchar,\n" +
" proc as PROCTIME(), \n" +
" PRIMARY KEY (a, b)\n" +
")\n" +
"comment 'test table comment ABC.'\n" +
"PARTITIONED BY (a, h)\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` BIGINT COMMENT 'test column comment AAA.',\n" +
" `H` VARCHAR,\n" +
" `G` AS (2 * (`A` + 1)) COMMENT 'test computed column.',\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" PRIMARY KEY (`A`, `B`)\n" +
")\n" +
"COMMENT 'test table comment ABC.'\n" +
"PARTITIONED BY (`A`, `H`)\n" +
"WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testTableConstraints() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint,\n" +
" h varchar, \n" +
" g as 2 * (a + 1),\n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'),\n" +
" b varchar,\n" +
" proc as PROCTIME(),\n" +
" PRIMARY KEY (a, b),\n" +
" UNIQUE (h, g)\n" +
") with (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` BIGINT,\n" +
" `H` VARCHAR,\n" +
" `G` AS (2 * (`A` + 1)),\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" PRIMARY KEY (`A`, `B`),\n" +
" UNIQUE (`H`, `G`)\n" +
") WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testTableConstraintsValidated() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint,\n" +
" h varchar, \n" +
" g as 2 * (a + 1),\n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'),\n" +
" b varchar,\n" +
" proc as PROCTIME(),\n" +
" PRIMARY KEY (a, b),\n" +
" UNIQUE (h, g)\n" +
") with (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` BIGINT NOT NULL,\n" +
" `H` VARCHAR,\n" +
" `G` AS (2 * (`A` + 1)),\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR NOT NULL,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" PRIMARY KEY (`A`, `B`),\n" +
" UNIQUE (`H`, `G`)\n" +
") WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).node(validated(expected));
}
@Test
public void testTableConstraintsWithEnforcement() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint primary key enforced comment 'test column comment AAA.',\n" +
" h varchar constraint ct1 unique not enforced,\n" +
" g as 2 * (a + 1), \n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'),\n" +
" b varchar constraint ct2 unique,\n" +
" proc as PROCTIME(),\n" +
" unique (g, ts) not enforced" +
") with (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` BIGINT PRIMARY KEY ENFORCED COMMENT 'test column comment AAA.',\n" +
" `H` VARCHAR CONSTRAINT `CT1` UNIQUE NOT ENFORCED,\n" +
" `G` AS (2 * (`A` + 1)),\n" +
" `TS` AS `TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss'),\n" +
" `B` VARCHAR CONSTRAINT `CT2` UNIQUE,\n" +
" `PROC` AS `PROCTIME`(),\n" +
" UNIQUE (`G`, `TS`) NOT ENFORCED\n" +
") WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testDuplicatePk() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a bigint comment 'test column comment AAA.',\n" +
" h varchar constraint ct1 primary key,\n" +
" g as 2 * (a + 1), \n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'),\n" +
" b varchar,\n" +
" proc as PROCTIME(),\n" +
" constraint ct2 primary key (b, h)" +
") with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
sql(sql).node(new ValidationMatcher()
.fails("Duplicate primary key definition"));
}
@Test
public void testCreateTableWithWatermark() {
final String sql = "CREATE TABLE tbl1 (\n" +
" ts timestamp(3),\n" +
" id varchar, \n" +
" watermark FOR ts AS ts - interval '3' second\n" +
")\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `TS` TIMESTAMP(3),\n" +
" `ID` VARCHAR,\n" +
" WATERMARK FOR `TS` AS (`TS` - INTERVAL '3' SECOND)\n" +
") WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithWatermarkOnComputedColumn() {
final String sql = "CREATE TABLE tbl1 (\n" +
" log_ts varchar,\n" +
" ts as to_timestamp(log_ts), \n" +
" WATERMARK FOR ts AS ts + interval '1' second\n" +
")\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `LOG_TS` VARCHAR,\n" +
" `TS` AS `TO_TIMESTAMP`(`LOG_TS`),\n" +
" WATERMARK FOR `TS` AS (`TS` + INTERVAL '1' SECOND)\n" +
") WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithWatermarkOnNestedField() {
final String sql = "CREATE TABLE tbl1 (\n" +
" f1 row<q1 bigint, q2 row<t1 timestamp, t2 varchar>, q3 boolean>,\n" +
" WATERMARK FOR f1.q2.t1 AS NOW()\n" +
")\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `F1` ROW< `Q1` BIGINT, `Q2` ROW< `T1` TIMESTAMP, `T2` VARCHAR >, `Q3` BOOLEAN >,\n" +
" WATERMARK FOR `F1`.`Q2`.`T1` AS `NOW`()\n" +
") WITH (\n" +
" 'connector' = 'kafka',\n" +
" 'kafka.topic' = 'log.test'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithMultipleWatermark() {
String sql = "CREATE TABLE tbl1 (\n" +
" f0 bigint,\n" +
" f1 varchar,\n" +
" f2 boolean,\n" +
" WATERMARK FOR f0 AS NOW(),\n" +
" ^WATERMARK^ FOR f1 AS NOW()\n" +
")\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
sql(sql)
.fails("Multiple WATERMARK statements is not supported yet.");
}
@Test
public void testCreateTableWithQueryWatermarkExpression() {
String sql = "CREATE TABLE tbl1 (\n" +
" f0 bigint,\n" +
" f1 varchar,\n" +
" f2 boolean,\n" +
" WATERMARK FOR f0 AS ^(^SELECT f1 FROM tbl1)\n" +
")\n" +
" with (\n" +
" 'connector' = 'kafka', \n" +
" 'kafka.topic' = 'log.test'\n" +
")\n";
sql(sql)
.fails("Query expression encountered in illegal context");
}
@Test
public void testCreateTableWithComplexType() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a ARRAY<bigint>, \n" +
" b MAP<int, varchar>,\n" +
" c ROW<cc0 int, cc1 float, cc2 varchar>,\n" +
" d MULTISET<varchar>,\n" +
" PRIMARY KEY (a, b) \n" +
") with (\n" +
" 'x' = 'y', \n" +
" 'asd' = 'data'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` ARRAY< BIGINT >,\n" +
" `B` MAP< INTEGER, VARCHAR >,\n" +
" `C` ROW< `CC0` INTEGER, `CC1` FLOAT, `CC2` VARCHAR >,\n" +
" `D` MULTISET< VARCHAR >,\n" +
" PRIMARY KEY (`A`, `B`)\n" +
") WITH (\n" +
" 'x' = 'y',\n" +
" 'asd' = 'data'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithNestedComplexType() {
final String sql = "CREATE TABLE tbl1 (\n" +
" a ARRAY<ARRAY<bigint>>, \n" +
" b MAP<MAP<int, varchar>, ARRAY<varchar>>,\n" +
" c ROW<cc0 ARRAY<int>, cc1 float, cc2 varchar>,\n" +
" d MULTISET<ARRAY<int>>,\n" +
" PRIMARY KEY (a, b) \n" +
") with (\n" +
" 'x' = 'y', \n" +
" 'asd' = 'data'\n" +
")\n";
final String expected = "CREATE TABLE `TBL1` (\n" +
" `A` ARRAY< ARRAY< BIGINT > >,\n" +
" `B` MAP< MAP< INTEGER, VARCHAR >, ARRAY< VARCHAR > >,\n" +
" `C` ROW< `CC0` ARRAY< INTEGER >, `CC1` FLOAT, `CC2` VARCHAR >,\n" +
" `D` MULTISET< ARRAY< INTEGER > >,\n" +
" PRIMARY KEY (`A`, `B`)\n" +
") WITH (\n" +
" 'x' = 'y',\n" +
" 'asd' = 'data'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithUserDefinedType() {
final String sql = "create table t(\n" +
" a catalog1.db1.MyType1,\n" +
" b db2.MyType2\n" +
") with (\n" +
" 'k1' = 'v1',\n" +
" 'k2' = 'v2'\n" +
")";
final String expected = "CREATE TABLE `T` (\n" +
" `A` `CATALOG1`.`DB1`.`MYTYPE1`,\n" +
" `B` `DB2`.`MYTYPE2`\n" +
") WITH (\n" +
" 'k1' = 'v1',\n" +
" 'k2' = 'v2'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testInvalidComputedColumn() {
final String sql0 = "CREATE TABLE t1 (\n" +
" a bigint, \n" +
" b varchar,\n" +
" toTimestamp^(^b, 'yyyy-MM-dd HH:mm:ss'), \n" +
" PRIMARY KEY (a, b) \n" +
") with (\n" +
" 'x' = 'y', \n" +
" 'asd' = 'data'\n" +
")\n";
final String expect0 = "(?s).*Encountered \"\\(\" at line 4, column 14.\n" +
"Was expecting one of:\n" +
" \"AS\" ...\n" +
" \"STRING\" ...\n" +
".*";
sql(sql0).fails(expect0);
// Sub-query computed column expression is forbidden.
final String sql1 = "CREATE TABLE t1 (\n" +
" a bigint, \n" +
" b varchar,\n" +
" c as ^(^select max(d) from t2), \n" +
" PRIMARY KEY (a, b) \n" +
") with (\n" +
" 'x' = 'y', \n" +
" 'asd' = 'data'\n" +
")\n";
final String expect1 = "(?s).*Query expression encountered in illegal context.*";
sql(sql1).fails(expect1);
}
@Test
public void testColumnSqlString() {
final String sql = "CREATE TABLE sls_stream (\n" +
" a bigint, \n" +
" f as a + 1, \n" +
" b varchar,\n" +
" ts as toTimestamp(b, 'yyyy-MM-dd HH:mm:ss'), \n" +
" proc as PROCTIME(),\n" +
" c int,\n" +
" PRIMARY KEY (a, b) \n" +
") with (\n" +
" 'x' = 'y', \n" +
" 'asd' = 'data'\n" +
")\n";
final String expected = "`A`, (`A` + 1) AS `F`, `B`, "
+ "`TOTIMESTAMP`(`B`, 'yyyy-MM-dd HH:mm:ss') AS `TS`, "
+ "`PROCTIME`() AS `PROC`, `C`";
sql(sql).node(new ValidationMatcher()
.expectColumnSql(expected));
}
@Test
public void testCreateTableWithMinusInOptionKey() {
final String sql = "create table source_table(\n" +
" a int,\n" +
" b bigint,\n" +
" c string\n" +
") with (\n" +
" 'a-b-c-d124' = 'ab',\n" +
" 'a.b.1.c' = 'aabb',\n" +
" 'a.b-c-connector.e-f.g' = 'ada',\n" +
" 'a.b-c-d.e-1231.g' = 'ada',\n" +
" 'a.b-c-d.*' = 'adad')\n";
final String expected = "CREATE TABLE `SOURCE_TABLE` (\n" +
" `A` INTEGER,\n" +
" `B` BIGINT,\n" +
" `C` STRING\n" +
") WITH (\n" +
" 'a-b-c-d124' = 'ab',\n" +
" 'a.b.1.c' = 'aabb',\n" +
" 'a.b-c-connector.e-f.g' = 'ada',\n" +
" 'a.b-c-d.e-1231.g' = 'ada',\n" +
" 'a.b-c-d.*' = 'adad'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithOptionKeyAsIdentifier() {
final String sql = "create table source_table(\n" +
" a int,\n" +
" b bigint,\n" +
" c string\n" +
") with (\n" +
" ^a^.b.c = 'ab',\n" +
" a.b.c1 = 'aabb')\n";
sql(sql).fails("(?s).*Encountered \"a\" at line 6, column 3.\n.*");
}
@Test
public void testCreateTableWithLikeClause() {
final String sql = "create table source_table(\n" +
" a int,\n" +
" b bigint,\n" +
" c string\n" +
")\n" +
"LIKE parent_table (\n" +
" INCLUDING ALL\n" +
" OVERWRITING OPTIONS\n" +
" EXCLUDING PARTITIONS\n" +
" INCLUDING GENERATED\n" +
" INCLUDING METADATA\n" +
")";
final String expected = "CREATE TABLE `SOURCE_TABLE` (\n" +
" `A` INTEGER,\n" +
" `B` BIGINT,\n" +
" `C` STRING\n" +
")\n" +
"LIKE `PARENT_TABLE` (\n" +
" INCLUDING ALL\n" +
" OVERWRITING OPTIONS\n" +
" EXCLUDING PARTITIONS\n" +
" INCLUDING GENERATED\n" +
" INCLUDING METADATA\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTemporaryTable() {
final String sql = "create temporary table source_table(\n" +
" a int,\n" +
" b bigint,\n" +
" c string\n" +
") with (\n" +
" 'x' = 'y',\n" +
" 'abc' = 'def'\n" +
")";
final String expected = "CREATE TEMPORARY TABLE `SOURCE_TABLE` (\n" +
" `A` INTEGER,\n" +
" `B` BIGINT,\n" +
" `C` STRING\n" +
") WITH (\n" +
" 'x' = 'y',\n" +
" 'abc' = 'def'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testCreateTableWithNoColumns() {
final String sql = "create table source_table with (\n" +
" 'x' = 'y',\n" +
" 'abc' = 'def'\n" +
")";
final String expected = "CREATE TABLE `SOURCE_TABLE` WITH (\n" +
" 'x' = 'y',\n" +
" 'abc' = 'def'\n" +
")";
sql(sql).ok(expected);
}
@Test
public void testDropTable() {
final String sql = "DROP table catalog1.db1.tbl1";
final String expected = "DROP TABLE `CATALOG1`.`DB1`.`TBL1`";
sql(sql).ok(expected);
}
@Test
public void testDropIfExists() {
final String sql = "DROP table IF EXISTS catalog1.db1.tbl1";
final String expected = "DROP TABLE IF EXISTS `CATALOG1`.`DB1`.`TBL1`";
sql(sql).ok(expected);
}
@Test
public void testTemporaryDropTable() {
final String sql = "DROP temporary table catalog1.db1.tbl1";
final String expected = "DROP TEMPORARY TABLE `CATALOG1`.`DB1`.`TBL1`";
sql(sql).ok(expected);
}
@Test
public void testDropTemporaryIfExists() {
final String sql = "DROP temporary table IF EXISTS catalog1.db1.tbl1";
final String expected = "DROP TEMPORARY TABLE IF EXISTS `CATALOG1`.`DB1`.`TBL1`";
sql(sql).ok(expected);
}
@Test
public void testInsertPartitionSpecs() {
final String sql1 = "insert into emps(x,y) partition (x='ab', y='bc') select * from emps";
final String expected = "INSERT INTO `EMPS` (`X`, `Y`)\n"
+ "PARTITION (`X` = 'ab', `Y` = 'bc')\n"
+ "(SELECT *\n"
+ "FROM `EMPS`)";
sql(sql1).ok(expected);
final String sql2 = "insert into emp (empno, ename, job, mgr, hiredate,\n"
+ " sal, comm, deptno, slacker)\n"
+ "partition(empno='1', job='job')\n"
+ "select 'nom', 0, timestamp '1970-01-01 00:00:00',\n"
+ " 1, 1, 1, false\n"
+ "from (values 'a')";
sql(sql2).ok("INSERT INTO `EMP` (`EMPNO`, `ENAME`, `JOB`, `MGR`, `HIREDATE`, `SAL`," +
" `COMM`, `DEPTNO`, `SLACKER`)\n"
+ "PARTITION (`EMPNO` = '1', `JOB` = 'job')\n"
+ "(SELECT 'nom', 0, TIMESTAMP '1970-01-01 00:00:00', 1, 1, 1, FALSE\n"
+ "FROM (VALUES (ROW('a'))))");
final String sql3 = "insert into empnullables (empno, ename)\n"
+ "partition(ename='b')\n"
+ "select 1 from (values 'a')";
sql(sql3).ok("INSERT INTO `EMPNULLABLES` (`EMPNO`, `ENAME`)\n"
+ "PARTITION (`ENAME` = 'b')\n"
+ "(SELECT 1\n"
+ "FROM (VALUES (ROW('a'))))");
}
@Test
public void testInsertCaseSensitivePartitionSpecs() {
final String expected = "INSERT INTO `emps` (`x`, `y`)\n"
+ "PARTITION (`x` = 'ab', `y` = 'bc')\n"
+ "(SELECT *\n"
+ "FROM `EMPS`)";
sql("insert into \"emps\"(\"x\",\"y\") "
+ "partition (\"x\"='ab', \"y\"='bc') select * from emps")
.ok(expected);
}
@Test
public void testInsertExtendedColumnAsStaticPartition1() {
final String expected = "INSERT INTO `EMPS` EXTEND (`Z` BOOLEAN) (`X`, `Y`)\n"
+ "PARTITION (`Z` = 'ab')\n"
+ "(SELECT *\n"
+ "FROM `EMPS`)";
sql("insert into emps(z boolean)(x,y) partition (z='ab') select * from emps")
.ok(expected);
}
@Test(expected = SqlParseException.class)
public void testInsertExtendedColumnAsStaticPartition2() {
sql("insert into emps(x, y, z boolean) partition (z='ab') select * from emps")
.node(new ValidationMatcher()
.fails("Extended columns not allowed under the current SQL conformance level"));
}
@Test
public void testInsertOverwrite() {
// non-partitioned
final String sql = "INSERT OVERWRITE myDB.myTbl SELECT * FROM src";
final String expected = "INSERT OVERWRITE `MYDB`.`MYTBL`\n"
+ "(SELECT *\n"
+ "FROM `SRC`)";
sql(sql).ok(expected);
// partitioned
final String sql1 = "INSERT OVERWRITE myTbl PARTITION (p1='v1',p2='v2') SELECT * FROM src";
final String expected1 = "INSERT OVERWRITE `MYTBL`\n"
+ "PARTITION (`P1` = 'v1', `P2` = 'v2')\n"
+ "(SELECT *\n"
+ "FROM `SRC`)";
sql(sql1).ok(expected1);
}
@Test
public void testInvalidUpsertOverwrite() {
sql("UPSERT ^OVERWRITE^ myDB.myTbl SELECT * FROM src")
.fails("OVERWRITE expression is only used with INSERT statement.");
}
@Test
public void testCreateView() {
final String sql = "create view v as select col1 from tbl";
final String expected = "CREATE VIEW `V`\n" +
"AS\n" +
"SELECT `COL1`\n" +
"FROM `TBL`";
sql(sql).ok(expected);
}
@Test
public void testCreateViewWithInvalidFieldList() {
final String expected = "(?s).*Encountered \"\\)\" at line 1, column 15.\n" +
"Was expecting one of:\n" +
".*\n" +
".*\n" +
".*\n" +
".*\n" +
".*";
sql("CREATE VIEW V(^)^ AS SELECT * FROM TBL")
.fails(expected);
}
@Test
public void testCreateViewWithComment() {
final String sql = "create view v COMMENT 'this is a view' as select col1 from tbl";
final String expected = "CREATE VIEW `V`\n" +
"COMMENT 'this is a view'\n" +
"AS\n" +
"SELECT `COL1`\n" +
"FROM `TBL`";
sql(sql).ok(expected);
}
@Test
public void testCreateViewWithFieldNames() {
final String sql = "create view v(col1, col2) as select col3, col4 from tbl";
final String expected = "CREATE VIEW `V` (`COL1`, `COL2`)\n" +
"AS\n" +
"SELECT `COL3`, `COL4`\n" +
"FROM `TBL`";
sql(sql).ok(expected);
}
@Test
public void testCreateViewWithInvalidName() {
final String sql = "create view v(^*^) COMMENT 'this is a view' as select col1 from tbl";
final String expected = "(?s).*Encountered \"\\*\" at line 1, column 15.*";
sql(sql).fails(expected);
}
@Test
public void testCreateTemporaryView() {
final String sql = "create temporary view v as select col1 from tbl";
final String expected = "CREATE TEMPORARY VIEW `V`\n" +
"AS\n" +
"SELECT `COL1`\n" +
"FROM `TBL`";
sql(sql).ok(expected);
}
@Test
public void testCreateTemporaryViewIfNotExists() {
final String sql = "create temporary view if not exists v as select col1 from tbl";
final String expected = "CREATE TEMPORARY VIEW IF NOT EXISTS `V`\n" +
"AS\n" +
"SELECT `COL1`\n" +
"FROM `TBL`";
sql(sql).ok(expected);
}
@Test
public void testCreateViewIfNotExists() {
final String sql = "create view if not exists v as select col1 from tbl";
final String expected = "CREATE VIEW IF NOT EXISTS `V`\n" +
"AS\n" +
"SELECT `COL1`\n" +
"FROM `TBL`";
sql(sql).ok(expected);
}
@Test
public void testDropView() {
final String sql = "DROP VIEW IF EXISTS view_name";
final String expected = "DROP VIEW IF EXISTS `VIEW_NAME`";
sql(sql).ok(expected);
}
@Test
public void testDropTemporaryView() {
final String sql = "DROP TEMPORARY VIEW IF EXISTS view_name";
final String expected = "DROP TEMPORARY VIEW IF EXISTS `VIEW_NAME`";
sql(sql).ok(expected);
}
@Test
public void testShowViews() {
sql("show views").ok("SHOW VIEWS");
}
// Override the test because our ROW field type default is nullable,
// which is different with Calcite.
@Test
public void testCastAsRowType() {
final String expr = "cast(a as row(f0 int, f1 varchar))";
final String expected = "CAST(`A` AS ROW(`F0` INTEGER, `F1` VARCHAR))";
expr(expr).ok(expected);
final String expr1 = "cast(a as row(f0 int not null, f1 varchar null))";
final String expected1 = "CAST(`A` AS ROW(`F0` INTEGER NOT NULL, `F1` VARCHAR))";
expr(expr1).ok(expected1);
final String expr2 = "cast(a as row(f0 row(ff0 int not null, ff1 varchar null) null,"
+ " f1 timestamp not null))";
final String expected2 = "CAST(`A` AS ROW(`F0` ROW(`FF0` INTEGER NOT NULL, `FF1` VARCHAR),"
+ " `F1` TIMESTAMP NOT NULL))";
expr(expr2).ok(expected2);
final String expr3 = "cast(a as row(f0 bigint not null, f1 decimal null) array)";
final String expected3 = "CAST(`A` AS ROW(`F0` BIGINT NOT NULL, `F1` DECIMAL) ARRAY)";
expr(expr3).ok(expected3);
final String expr4 = "cast(a as row(f0 varchar not null, f1 timestamp null) multiset)";
final String expected4 = "CAST(`A` AS ROW(`F0` VARCHAR NOT NULL, `F1` TIMESTAMP) MULTISET)";
expr(expr4).ok(expected4);
}
@Test
public void testCreateTableWithNakedTableName() {
String sql = "CREATE TABLE tbl1";
sql(sql).node(new ValidationMatcher());
}
@Test
public void testCreateViewWithEmptyFields() {
String sql = "CREATE VIEW v1 AS SELECT 1";
sql(sql).ok(
"CREATE VIEW `V1`\n"
+ "AS\n"
+ "SELECT 1"
);
}
@Test
public void testCreateFunction() {
sql("create function catalog1.db1.function1 as 'org.apache.fink.function.function1'")
.ok("CREATE FUNCTION `CATALOG1`.`DB1`.`FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("create temporary function catalog1.db1.function1 as 'org.apache.fink.function.function1'")
.ok("CREATE TEMPORARY FUNCTION `CATALOG1`.`DB1`.`FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("create temporary function db1.function1 as 'org.apache.fink.function.function1'")
.ok("CREATE TEMPORARY FUNCTION `DB1`.`FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("create temporary function function1 as 'org.apache.fink.function.function1'")
.ok("CREATE TEMPORARY FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("create temporary function if not exists catalog1.db1.function1 as 'org.apache.fink.function.function1'")
.ok("CREATE TEMPORARY FUNCTION IF NOT EXISTS `CATALOG1`.`DB1`.`FUNCTION1` AS 'org.apache.fink.function.function1'");
sql("create temporary function function1 as 'org.apache.fink.function.function1' language java")
.ok("CREATE TEMPORARY FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1' LANGUAGE JAVA");
sql("create temporary system function function1 as 'org.apache.fink.function.function1' language scala")
.ok("CREATE TEMPORARY SYSTEM FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1' LANGUAGE SCALA");
// Temporary system function always belongs to the system and current session.
sql("create temporary system function catalog1^.^db1.function1 as 'org.apache.fink.function.function1'")
.fails("(?s).*Encountered \".\" at.*");
// TODO: FLINK-17957: Forbidden syntax "CREATE SYSTEM FUNCTION" for sql parser
sql("create system function function1 as 'org.apache.fink.function.function1'")
.ok("CREATE SYSTEM FUNCTION `FUNCTION1` AS 'org.apache.fink.function.function1'");
}
@Test
public void testDropTemporaryFunction() {
sql("drop temporary function catalog1.db1.function1")
.ok("DROP TEMPORARY FUNCTION `CATALOG1`.`DB1`.`FUNCTION1`");
sql("drop temporary system function catalog1.db1.function1")
.ok("DROP TEMPORARY SYSTEM FUNCTION `CATALOG1`.`DB1`.`FUNCTION1`");
sql("drop temporary function if exists catalog1.db1.function1")
.ok("DROP TEMPORARY FUNCTION IF EXISTS `CATALOG1`.`DB1`.`FUNCTION1`");
sql("drop temporary system function if exists catalog1.db1.function1")
.ok("DROP TEMPORARY SYSTEM FUNCTION IF EXISTS `CATALOG1`.`DB1`.`FUNCTION1`");
}
public static BaseMatcher<SqlNode> validated(String validatedSql) {
return new TypeSafeDiagnosingMatcher<SqlNode>() {
@Override
protected boolean matchesSafely(SqlNode item, Description mismatchDescription) {
if (item instanceof ExtendedSqlNode) {
try {
((ExtendedSqlNode) item).validate();
} catch (SqlValidateException e) {
mismatchDescription.appendText("Could not validate the node. Exception: \n");
mismatchDescription.appendValue(e);
}
String actual = item.toSqlString(null, true).getSql();
return actual.equals(validatedSql);
}
mismatchDescription.appendText("This matcher can be applied only to ExtendedSqlNode.");
return false;
}
@Override
public void describeTo(Description description) {
description.appendText("The validated node string representation should be equal to: \n");
description.appendText(validatedSql);
}
};
}
/** Matcher that invokes the #validate() of the {@link ExtendedSqlNode} instance. **/
private static class ValidationMatcher extends BaseMatcher<SqlNode> {
private String expectedColumnSql;
private String failMsg;
public ValidationMatcher expectColumnSql(String s) {
this.expectedColumnSql = s;
return this;
}
public ValidationMatcher fails(String failMsg) {
this.failMsg = failMsg;
return this;
}
@Override
public void describeTo(Description description) {
description.appendText("test");
}
@Override
public boolean matches(Object item) {
if (item instanceof ExtendedSqlNode) {
ExtendedSqlNode createTable = (ExtendedSqlNode) item;
if (failMsg != null) {
try {
createTable.validate();
fail("expected exception");
} catch (SqlValidateException e) {
assertEquals(failMsg, e.getMessage());
}
}
if (expectedColumnSql != null && item instanceof SqlCreateTable) {
assertEquals(expectedColumnSql,
((SqlCreateTable) createTable).getColumnSqlString());
}
return true;
} else {
return false;
}
}
}
}
|
|
/*
* Copyright 2018 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.rpc.client;
import com.navercorp.pinpoint.common.util.Assert;
import com.navercorp.pinpoint.rpc.DefaultFuture;
import com.navercorp.pinpoint.rpc.Future;
import com.navercorp.pinpoint.rpc.PinpointSocketException;
import com.navercorp.pinpoint.rpc.ResponseMessage;
import com.navercorp.pinpoint.rpc.cluster.ClusterOption;
import com.navercorp.pinpoint.rpc.packet.RequestPacket;
import com.navercorp.pinpoint.rpc.stream.ClientStreamChannel;
import com.navercorp.pinpoint.rpc.stream.ClientStreamChannelContext;
import com.navercorp.pinpoint.rpc.stream.ClientStreamChannelMessageListener;
import com.navercorp.pinpoint.rpc.stream.StreamChannelContext;
import com.navercorp.pinpoint.rpc.stream.StreamChannelStateChangeEventHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.SocketAddress;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
/**
* @author Woonduk Kang(emeroad)
*/
public class DefaultPinpointClient implements PinpointClient {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private volatile PinpointClientHandler pinpointClientHandler;
private volatile boolean closed;
private List<PinpointClientReconnectEventListener> reconnectEventListeners = new CopyOnWriteArrayList<PinpointClientReconnectEventListener>();
public DefaultPinpointClient(PinpointClientHandler pinpointClientHandler) {
this.pinpointClientHandler = Assert.requireNonNull(pinpointClientHandler, "pinpointClientHandler");
pinpointClientHandler.setPinpointClient(this);
}
@Override
public void reconnectSocketHandler(PinpointClientHandler pinpointClientHandler) {
Assert.requireNonNull(pinpointClientHandler, "pinpointClientHandler");
if (closed) {
logger.warn("reconnectClientHandler(). pinpointClientHandler force close.");
pinpointClientHandler.close();
return;
}
logger.warn("reconnectClientHandler:{}", pinpointClientHandler);
this.pinpointClientHandler = pinpointClientHandler;
notifyReconnectEvent();
}
/*
because reconnectEventListener's constructor contains Dummy and can't be access through setter,
guarantee it is not null.
*/
@Override
public boolean addPinpointClientReconnectEventListener(PinpointClientReconnectEventListener eventListener) {
if (eventListener == null) {
return false;
}
return this.reconnectEventListeners.add(eventListener);
}
@Override
public boolean removePinpointClientReconnectEventListener(PinpointClientReconnectEventListener eventListener) {
if (eventListener == null) {
return false;
}
return this.reconnectEventListeners.remove(eventListener);
}
private void notifyReconnectEvent() {
for (PinpointClientReconnectEventListener eachListener : this.reconnectEventListeners) {
eachListener.reconnectPerformed(this);
}
}
@Override
public void sendSync(byte[] bytes) {
ensureOpen();
pinpointClientHandler.sendSync(bytes);
}
@Override
public Future sendAsync(byte[] bytes) {
ensureOpen();
return pinpointClientHandler.sendAsync(bytes);
}
@Override
public void send(byte[] bytes) {
ensureOpen();
pinpointClientHandler.send(bytes);
}
@Override
public Future<ResponseMessage> request(byte[] bytes) {
if (pinpointClientHandler == null) {
return returnFailureFuture();
}
return pinpointClientHandler.request(bytes);
}
@Override
public void response(int requestId, byte[] payload) {
ensureOpen();
pinpointClientHandler.response(requestId, payload);
}
@Override
public ClientStreamChannelContext openStream(byte[] payload, ClientStreamChannelMessageListener messageListener) {
return openStream(payload, messageListener, null);
}
@Override
public ClientStreamChannelContext openStream(byte[] payload, ClientStreamChannelMessageListener messageListener, StreamChannelStateChangeEventHandler<ClientStreamChannel> stateChangeListener) {
// StreamChannel must be changed into interface in order to throw the StreamChannel that returns failure.
// fow now throw just exception
ensureOpen();
return pinpointClientHandler.openStream(payload, messageListener, stateChangeListener);
}
@Override
public SocketAddress getRemoteAddress() {
return pinpointClientHandler.getRemoteAddress();
}
@Override
public ClusterOption getLocalClusterOption() {
return pinpointClientHandler.getLocalClusterOption();
}
@Override
public ClusterOption getRemoteClusterOption() {
return pinpointClientHandler.getRemoteClusterOption();
}
@Override
public StreamChannelContext findStreamChannel(int streamChannelId) {
ensureOpen();
return pinpointClientHandler.findStreamChannel(streamChannelId);
}
private Future<ResponseMessage> returnFailureFuture() {
DefaultFuture<ResponseMessage> future = new DefaultFuture<ResponseMessage>();
future.setFailure(new PinpointSocketException("pinpointClientHandler is null"));
return future;
}
private void ensureOpen() {
if (pinpointClientHandler == null) {
throw new PinpointSocketException("pinpointClientHandler is null");
}
}
/**
* write ping packet on tcp channel
* PinpointSocketException throws when writing fails.
*
*/
@Override
public void sendPing() {
PinpointClientHandler pinpointClientHandler = this.pinpointClientHandler;
if (pinpointClientHandler == null) {
return;
}
pinpointClientHandler.sendPing();
}
@Override
public void close() {
synchronized (this) {
if (closed) {
return;
}
closed = true;
}
PinpointClientHandler pinpointClientHandler = this.pinpointClientHandler;
if (pinpointClientHandler == null) {
return;
}
pinpointClientHandler.close();
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public boolean isConnected() {
return this.pinpointClientHandler.isConnected();
}
}
|
|
/*
* File: CitrusTestLinkUtils.java
*
* Copyright (c) 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* last modified: Friday, June 29, 2012 (22:55) by: Matthias Beil
*/
package de.eimb.testlink.synchronize.citrus.utils;
import java.text.SimpleDateFormat;
import java.util.Date;
import com.consol.citrus.TestCase;
import de.eimb.testlink.synchronize.citrus.CitrusTestLinkBean;
import de.eimb.testlink.synchronize.citrus.CitrusTestLinkEnum;
/**
* Utility class for handling CITRUS to TestLink functionality.
*
* @author Matthias Beil
* @since TestLink-Synchronize 1.0.0
*/
public abstract class CitrusTestLinkUtils {
// ~ Static fields/initializers --------------------------------------------------------------
/** DATE_FORMAT. */
private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:SSS";
// ~ Constructors ----------------------------------------------------------------------------
/**
* Constructor for {@code CitrusTestLinkUtils} class.
*/
private CitrusTestLinkUtils() {
super();
}
// ~ Methods ---------------------------------------------------------------------------------
/**
* Check if this CITRUS test case should write his result to TestLink. Use a boolean for this so
* in CITRUS this can be set by some means of global variable.
*
* @param citrusCase CITRUS test case holding the test case variables.
*
* @return {@code True} in case the {@link CitrusTestLinkEnum#WriteToTestLink} value is defined
* and is set to {@code true}. In all other case {@code false} is returned.
*/
public static final boolean writeToTestLink(final TestCase citrusCase) {
// make sure there are some test case variables
if ((null != citrusCase) && (null != citrusCase.getTestContext()) &&
(null != citrusCase.getTestContext().getVariables())) {
// check if write to TestLink variable is defined
if (citrusCase.getTestContext().getVariables().containsKey(
CitrusTestLinkEnum.WriteToTestLink.getKey())) {
// get value and convert it to a Boolean
final Object obj = citrusCase.getTestContext().getVariables().get(
CitrusTestLinkEnum.WriteToTestLink.getKey());
final Boolean write = ConvertUtils.convertToBoolean(obj);
if (null != write) {
return write.booleanValue();
}
}
}
return false;
}
/**
* Build the ID of the CITRUS test case. This ID is made up of the package and test case name.
*
* @param citrusCase CITRUS test case.
*
* @return The CITRUS ID allowing to identify this test case uniquely.
*/
public static final String buildId(final TestCase citrusCase) {
if (null == citrusCase) {
return null;
}
final StringBuilder builder = new StringBuilder(citrusCase.getPackageName());
builder.append(".");
builder.append(citrusCase.getName());
return builder.toString();
}
/**
* Create a new CITRUS to TestLink bean, which must hold all data needed to write the result to
* TestLink.
*
* @param citrusCase CITRUS test case.
* @param url TestLink URL coming from the properties of the TestLink listener, if
* provided.
* @param key TestLink development key from the properties of the TestLink listener, if
* provided.
*
* @return Newly create CITRUS to TestLink bean holding all predefined values. In case of an
* error {@code null} is returned.
*/
public static final CitrusTestLinkBean createCitrusBean(final TestCase citrusCase,
final String url, final String key) {
// get ID to allow to identify this bean
final String id = buildId(citrusCase);
if ((null == id) || (id.isEmpty())) {
return null;
}
final CitrusTestLinkBean bean = new CitrusTestLinkBean();
// preset with values from test listener, if they are defined
bean.setId(id);
bean.setUrl(url);
bean.setKey(key);
return bean;
}
/**
* Build note and assign it depending on the success information. If there is no success
* information available, no notes will be set.
*
* @param bean CITRUS TestLink bean.
*/
public static final void buildNotes(final CitrusTestLinkBean bean) {
// make sure there is some success / failure information
if ((null == bean) || (null == bean.getSuccess())) {
return;
}
final StringBuilder builder = new StringBuilder();
// always add the execution duration
builder.append("Execution [ ");
final SimpleDateFormat sdf = new SimpleDateFormat(DATE_FORMAT);
builder.append(sdf.format(new Date(bean.getEndTime())));
builder.append(" ] duration for CITRUS test case [ ");
builder.append(bean.getId());
builder.append(" ] was [ ");
builder.append(bean.getEndTime() - bean.getStartTime());
builder.append(" ] milliseconds.");
if (bean.getSuccess().booleanValue()) {
// handle success note
if ((null != bean.getNotesSuccess()) && (!bean.getNotesSuccess().isEmpty())) {
builder.append("\n");
builder.append(bean.getNotesSuccess());
}
bean.setNotesSuccess(builder.toString());
} else {
// handle failure note
if ((null != bean.getNotesFailure()) && (!bean.getNotesFailure().isEmpty())) {
builder.append("\n");
builder.append(bean.getNotesFailure());
}
if (null != bean.getCause()) {
builder.append("\nFailure due to [ \n");
builder.append(ConvertUtils.throwableToString(bean.getCause()));
builder.append("\n ]");
}
bean.setNotesFailure(builder.toString());
}
}
}
|
|
package sample;
import org.apache.commons.dbcp2.BasicDataSource;
import org.h2.tools.Server;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
/**
* single thread, individual update, standalone: 24467ms
* single thread, batch update, standalone: 16270ms
* multi thread, individual update, standalone: 23325ms
* multi thread, batch update, standalone: 15499ms
* single thread, individual update, server: 649822ms
* single thread, batch update, server: 287157ms
* multi thread, individual update, server: 257172ms
* multi thread, batch update, server: 144632ms
*/
public class TestH2InsertPerformance {
private static final boolean singleThread = false;
private static final boolean batchUpdate = true;
private static final boolean standalone = true;
private static final String INSERT_SQL = "insert into logs values (?, ?, ?, ?, ?)";
private static Server server;
private static DataSource dataSource;
public static void main(String[] args) throws Exception {
if (singleThread) {
System.out.print("single thread, ");
} else {
System.out.print(" multi thread, ");
}
if (batchUpdate) {
System.out.print(" batch update, ");
} else {
System.out.print("individual update, ");
}
if (standalone) {
System.out.print("standalone: ");
initStandaloneDatabase();
} else {
System.out.print(" server: ");
initServerDatabase();
}
recreateTable();
System.out.println();
stopWatch(() -> {
if (singleThread) {
test_single_thread();
} else {
test_multi_thread();
}
});
if (server != null) {
server.stop();
}
}
private static void initStandaloneDatabase() {
BasicDataSource dataSource = new BasicDataSource();
dataSource.setUrl("jdbc:h2:./build/database");
dataSource.setUsername("sa");
dataSource.setPassword("");
dataSource.setInitialSize(4);
dataSource.setMaxTotal(4);
TestH2InsertPerformance.dataSource = dataSource;
}
private static void initServerDatabase() throws SQLException {
server = Server.createTcpServer();
server.start();
BasicDataSource dataSource = new BasicDataSource();
dataSource.setUrl("jdbc:h2:tcp://localhost/./build/database");
dataSource.setUsername("sa");
dataSource.setPassword("");
dataSource.setInitialSize(4);
dataSource.setMaxTotal(4);
TestH2InsertPerformance.dataSource = dataSource;
}
private static void recreateTable() throws SQLException {
try (Connection con = dataSource.getConnection()) {
Database.update(con, "drop table logs");
Database.update(con,"create table logs (" +
" user_name nvarchar2(64) not null," +
" date_time timestamp not null," +
" sequence_number integer not null," +
" ap_name nvarchar2(64) not null," +
" message nvarchar(256) not null" +
")");
}
}
private static void test_multi_thread() {
AtomicInteger counter = new AtomicInteger(0);
List<Callable<Void>> tasks = new ArrayList<>();
for (int i=0; i<3; i++) {
String ap = "ap" + i;
tasks.add(() -> {
try (Database database = new Database(dataSource.getConnection(), INSERT_SQL)) {
insertLog(database, ap, counter);
}
return null;
});
}
ExecutorService service = Executors.newFixedThreadPool(3);
try {
service.invokeAll(tasks);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
service.shutdown();
}
}
private static void test_single_thread() throws SQLException {
AtomicInteger counter = new AtomicInteger(0);
try (Database database = new Database(dataSource.getConnection(), INSERT_SQL)) {
for (int i=0; i<3; i++) {
String ap = "ap" + i;
insertLog(database, ap, counter);
}
}
}
private static void stopWatch(TestBlock runnable) throws Exception {
System.out.println("start");
long start = System.currentTimeMillis();
runnable.execute();
long end = System.currentTimeMillis();
System.out.println();
System.out.println((end - start) + "ms");
}
private static LocalDateTime now = LocalDateTime.now();
private static void insertLog(Database database, String ap, AtomicInteger counter) throws SQLException {
for (int i=0; i<900_000; i++) {
String user = "user" + (i%4);
database.update(user, now.plus(i, ChronoUnit.MILLIS), i, ap, UUID.randomUUID().toString());
int count = counter.incrementAndGet();
if (count % 10000 == 0) {
System.out.print(".");
}
}
}
private static class Database implements AutoCloseable {
private final Connection con;
private final String sql;
private PreparedStatement ps;
private int batchCounter;
private static void update(Connection con, String sql, Object... parameters) {
try (PreparedStatement ps = con.prepareStatement(sql)) {
setParameter(ps, parameters);
ps.executeUpdate();
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
private Database(Connection con, String sql) {
this.con = con;
this.sql = sql;
}
private void update(Object... parameters) throws SQLException {
if (batchUpdate) {
if (ps == null) {
ps = con.prepareStatement(sql);
}
setParameter(ps, parameters);
ps.addBatch();
batchCounter++;
if (batchCounter % 10000 == 0) {
ps.executeBatch();
ps.close();
ps = con.prepareStatement(sql);
batchCounter = 0;
}
} else {
try (PreparedStatement ps = con.prepareStatement(sql)) {
setParameter(ps, parameters);
ps.executeUpdate();
}
}
}
private static void setParameter(PreparedStatement ps, Object... parameters) throws SQLException {
for (int i=1; i<=parameters.length; i++) {
Object param = parameters[i-1];
if (param instanceof LocalDateTime) {
Timestamp timestamp = Timestamp.valueOf(((LocalDateTime) param));
ps.setTimestamp(i, timestamp );
} else {
ps.setObject(i, param);
}
}
}
@Override
public void close() {
try {
if (0 < batchCounter) {
ps.executeBatch();
ps.close();
batchCounter = 0;
}
con.close();
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.jini.test.spec.loader.pref.preferredClassLoader;
import java.util.logging.Level;
// com.sun.jini.qa.harness
import com.sun.jini.qa.harness.TestException;
import com.sun.jini.qa.harness.QAConfig;
// com.sun.jini.qa
import com.sun.jini.qa.harness.QATest;
import com.sun.jini.qa.harness.QAConfig;
// java.io
import java.io.IOException;
// java.net
import java.net.URL;
// java.util.logging
import java.util.logging.Logger;
import java.util.logging.Level;
// davis packages
import net.jini.loader.pref.PreferredClassLoader;
// instrumented preferred class loader
import com.sun.jini.test.spec.loader.util.Item;
import com.sun.jini.test.spec.loader.util.Util;
import com.sun.jini.test.spec.loader.util.QATestPreferredClassLoader;
// test base class
import com.sun.jini.test.spec.loader.pref.AbstractTestBase;
/**
* <b>Purpose</b><br><br>
*
* This test verifies the behavior of the<br>
* <code>protected Class loadClass(String name, boolean resolve)</code>
* method of the<br>
* <code>net.jini.loader.pref.PreferredClassLoader</code> class:
*
* <br><blockquote>
* If the name parameter is not preferred, the search is the same as with
* <code>ClassLoader.loadClass()</code>. If the name is preferred, then this
* method will call <code>findClass()</code> to load the class and will not
* delegate to the parent class loader.
* </blockquote>
* <ul><lh>Parameters:</lh>
* <li>name - the name of the class</li>
* <li>resolve - if true then resolve the class</li>
* </ul>
*
* <b>Test Description</b><br><br>
*
* This test iterates over a set of various parameters passing to
* {@link QATestPreferredClassLoader} constructors.
* All parameters are passing to the {@link #testCase} method.
* <ul><lh>Possible parameters are:</lh>
* <li>URL[] urls: http or file based url to qa1-loader-pref.jar file</li>
* <li>ClassLoader parent: ClassLoader.getSystemClassLoader()</li>
* <li>String exportAnnotation: <code>null</code>,
* "Any export annotation string"</li>
* <li>boolean requireDlPerm: <code>true</code>, <code>false</code></li>
* </ul>
*
* Each {@link #testCase} iterates over a set of preferred/non-preferred
* classes.
* There are two sets of classes with the same names there. The first set of
* classes can be found in the executing VM's classpath. The second set of
* classes are placed in the qa1-loader-pref.jar file and can be downloaded
* using http or file based url.
* <br><br>
* Class {@link Util} has a statically defined lists of all resources
* placed in the qa1-loader-pref.jar file. {@link Util#listClasses},
* {@link Util#listResources}, {@link Util#listLocalClasses},
* {@link Util#listLocalResources} define names and preferred status of
* these resources.
* <br><br>
* For each preferred/non-preferred class the testCase will try to execute
* <code>Class.forName</code> passing {@link QATestPreferredClassLoader}
* object and will try to execute <code>Class.forName</code> passing the
* <code>ClassLoader.getSystemClassLoader()</code>.
* <br><br>
* Then testCase will verify class identity using <code>equals</code> method.
* Loaded classes should be equal for non-preferred classes and should be
* not equal for preferred classes.
* <br><br>
*
* <b>Infrastructure</b><br><br>
*
* <ol><lh>This test requires the following infrastructure:</lh>
* <li> {@link QATestPreferredClassLoader} is an instrumented
* PreferredClassLoader using for davis.loader's and davis.loader.pref's
* testing.</li>
* <li> <code>META-INF/PREFERRED.LIST</code> with a set of
* preferred/non-preferred resources. <code>META-INF/PREFERRED.LIST</code>
* should be placed in the qa1-loader-pref.jar file.</li>
* <li> A first set of resources should be placed in the qa1.jar file, so these
* resource can be found in the executing VM's classpath.</li>
* <li> A second set of resources should be placed in the qa1-loader-pref.jar,
* so these resources can be downloaded.</li>
* </ol>
*
* <br>
*
* <b>Actions</b><br><br>
* <ol>
* <li> construct a {@link QATestPreferredClassLoader} with a single URL to
* the qa1-loader-pref.jar file and appropriate parameters.
* </li>
* <li> for each preferred/non-preferred class do the following:
* <ul>
* <li> Call {@link QATestPreferredClassLoader#clearAllFlags}
* method.</li>
* <li> invoke Class.forName method passing
* {@link QATestPreferredClassLoader},
* invoke Class.forName method passing system class loader and
* verify that returned classes are equal for non-preferred classes
* and are not equal for preferred classes.</li>
* <li> verify that {@link QATestPreferredClassLoader#loadClass}
* is called for all classes and
* {@link QATestPreferredClassLoader#findClass} is called
* for preferred classes only.</li>
* </ul>
* </li>
* <li> for classes that can be found in the parent class loader (such as in
* the class path) but cannot be found in the preferred class loader
* do the following:
* <ul>
* <li> Call {@link QATestPreferredClassLoader#clearAllFlags} method.</li>
* <li> invoke Class.forName method passing
* {@link QATestPreferredClassLoader},
* invoke Class.forName method passing system class loader and
* verify that returned classes are equal for all classes.</li>
* <li> verify that {@link QATestPreferredClassLoader#loadClass} is
* called for all classes and
* {@link QATestPreferredClassLoader#findClass} is not called.</li>
* </ul>
* </li>
* </ol>
*
*/
public class LoadClasses extends AbstractTestBase {
/** String to format message string */
static final String str1 = "preferred class";
/** String to format message string */
static final String str2 = "non-preferred class";
/** String that indicates fail status */
String message = "";
/** System class loader */
ClassLoader parent = Util.systemClassLoader();
/**
* Run the test according <b>Test Description</b>
*/
public void run() throws Exception {
String annotation = super.annotation;
testCase(true, null);
testCase(true, annotation);
testCase(false, null);
testCase(false, annotation);
if (message.length() > 0) {
throw new TestException(message);
}
}
/**
* Reset setup parameters by passing parameters and create
* {@link QATestPreferredClassLoader}.
* <br><br>
* Then run the test case according <b>Test Description</b>
*
* @param isHttp flag to define whether http or file url will be used
* for download preferred classes and resources
* @param annotation the exportAnnotation string
*
* @throws TestException if could not create instrumented preferred class
* loader
*/
public void testCase(boolean isHttp, String annotation)
throws TestException {
/*
* Reset setup parameters by passing parameters.
*/
super.isHttp = isHttp;
super.annotation = annotation;
/*
* 1) construct a QATestPreferredClassLoader according setup parameters
* with a single URL to the "qa1-loader-pref.jar file.
*/
createLoader(Util.PREFERREDJarFile);
/*
* 2) for each preferred/non-preferred class do the following:
* a) Call QATestPreferredClassLoader.clearAllFlags() method.
* b) invoke Class.forName method passing
* QATestPreferredClassLoader, invoke Class.forName method
* passing system class loader and assert that returned
* classes are equals for non-preferred classes and are not
* equals for preferred classes.
* c) assert that QATestPreferredClassLoader.loadClass() is called
* for all classes and QATestPreferredClassLoader.findClass()
* is called for preferred classes only.
*/
for (int item = 0; item < Util.listClasses.length; item++) {
loader.clearAllFlags();
String name = Util.listClasses[item].name;
Class classDefault = null;
Class classPreferred = null;
try {
classDefault = Class.forName(name, false, parent);
classPreferred = Class.forName(name, false, loader);
} catch (ClassNotFoundException e) {
message += "\nClass not found: " + name;
break;
} catch (SecurityException se) {
// Do not expect SecurityException.
// Tests with expected SecurityException
// are GetPermissionsHttpSecurityException and
// GetPermissionsFileSecurityException in the
// GetPermissions test case.
message += "\nClass.forName("
+ name + ", false, loader)\n"
+ " throws: " + se.toString() + "\n"
+ " expected: returned class";
// Fast fail approach
throw new TestException(message);
}
boolean expected = !Util.listClasses[item].pref;
boolean returned = classDefault.equals(classPreferred);
if (expected != returned) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader)\n"
+ " returned:" + (expected ? str1 : str2) + "\n"
+ " expected:" + (expected ? str2 : str1);
// Fast fail approach
throw new TestException(message);
} else {
String msg = "Class.forName(" + name
+ ", false, PreferredClassLoader)"
+ " returned " + (expected ? str2 : str1)
+ " as expected";
logger.log(Level.FINE, msg);
}
if (!loader.loadClassIsInvoked) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader) "
+ "does not invoke PreferredClassLoader.loadClass()";
// Fast fail approach
throw new TestException(message);
}
if (!expected && !loader.findClassIsInvoked) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader) "
+ "does not invoke PreferredClassLoader.findClass()"
+ " for preferred class";
// Fast fail approach
throw new TestException(message);
}
if (expected && loader.findClassIsInvoked) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader) "
+ "invoke PreferredClassLoader.findClass()"
+ " for non-preferred class";
// Fast fail approach
throw new TestException(message);
}
}
/*
* 3) for classes that can be found in the parent class loader
* (such as in the class path) but cannot be found in the
* preferred class loader do the following:
* a) Call QATestPreferredClassLoader.clearAllFlags() method.
* b) invoke Class.forName method passing
* QATestPreferredClassLoader, invoke Class.forName
* method passing system class loader and assert that
* returned classes are equals for all classes.
* c) assert that QATestPreferredClassLoader.loadClass() is
* called for all classes and
* QATestPreferredClassLoader.findClass() is not called.
*/
for (int item = 0; item < Util.listLocalClasses.length; item++) {
loader.clearAllFlags();
String name = Util.listLocalClasses[item].name;
Class classDefault = null;
Class classPreferred = null;
try {
classDefault = Class.forName(name, false, parent);
classPreferred = Class.forName(name, false, loader);
} catch (ClassNotFoundException e) {
message += "\nClass not found: " + name;
break;
}
boolean expected = true;
boolean returned = classDefault.equals(classPreferred);
if (expected != returned) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader)\n"
+ " returned:" + (expected ? str1 : str2) + "\n"
+ " expected:" + (expected ? str2 : str1);
// Fast fail approach
throw new TestException(message);
} else {
String msg = "Class.forName(" + name
+ ", false, PreferredClassLoader)"
+ " returned " + (expected ? str2 : str1)
+ " as expected";
logger.log(Level.FINE, msg);
}
if (!loader.loadClassIsInvoked) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader) "
+ "does not invoke PreferredClassLoader.loadClass";
// Fast fail approach
throw new TestException(message);
}
if (loader.findClassIsInvoked) {
message += "\nClass.forName("
+ name + ", false, PreferredClassLoader) "
+ "invoke PreferredClassLoader.findClass"
+ " for non-preferred class";
// Fast fail approach
throw new TestException(message);
}
}
}
}
|
|
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static org.junit.contrib.truth.Truth.ASSERT;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableBiMap.Builder;
import com.google.common.collect.testing.MapInterfaceTest;
import com.google.common.collect.testing.features.CollectionFeature;
import com.google.common.collect.testing.features.CollectionSize;
import com.google.common.collect.testing.features.MapFeature;
import com.google.common.collect.testing.google.BiMapInverseTester;
import com.google.common.collect.testing.google.BiMapTestSuiteBuilder;
import com.google.common.collect.testing.google.TestStringBiMapGenerator;
import com.google.common.testing.SerializableTester;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
/**
* Tests for {@link ImmutableBiMap}.
*
* @author Jared Levy
*/
@GwtCompatible(emulated = true)
public class ImmutableBiMapTest extends TestCase {
// TODO: Reduce duplication of ImmutableMapTest code
@GwtIncompatible("suite")
public static Test suite() {
TestSuite suite = new TestSuite();
suite.addTestSuite(MapTests.class);
suite.addTestSuite(InverseMapTests.class);
suite.addTestSuite(CreationTests.class);
suite.addTestSuite(BiMapSpecificTests.class);
suite.addTest(BiMapTestSuiteBuilder.using(new ImmutableBiMapGenerator())
.named("ImmutableBiMap")
.withFeatures(CollectionSize.ANY,
CollectionFeature.SERIALIZABLE,
MapFeature.REJECTS_DUPLICATES_AT_CREATION)
.suppressing(BiMapInverseTester.getInverseSameAfterSerializingMethods())
.createTestSuite());
return suite;
}
public static final class ImmutableBiMapGenerator extends TestStringBiMapGenerator {
@Override
protected BiMap<String, String> create(Entry<String, String>[] entries) {
ImmutableBiMap.Builder<String, String> result = ImmutableBiMap.builder();
for (Entry<String, String> entry : entries) {
result.put(entry.getKey(), entry.getValue());
}
return result.build();
}
}
public static abstract class AbstractMapTests<K, V>
extends MapInterfaceTest<K, V> {
public AbstractMapTests() {
super(false, false, false, false, false);
}
@Override protected Map<K, V> makeEmptyMap() {
throw new UnsupportedOperationException();
}
private static final Joiner joiner = Joiner.on(", ");
@Override protected void assertMoreInvariants(Map<K, V> map) {
BiMap<K, V> bimap = (BiMap<K, V>) map;
for (Entry<K, V> entry : map.entrySet()) {
assertEquals(entry.getKey() + "=" + entry.getValue(),
entry.toString());
assertEquals(entry.getKey(), bimap.inverse().get(entry.getValue()));
}
assertEquals("{" + joiner.join(map.entrySet()) + "}",
map.toString());
assertEquals("[" + joiner.join(map.entrySet()) + "]",
map.entrySet().toString());
assertEquals("[" + joiner.join(map.keySet()) + "]",
map.keySet().toString());
assertEquals("[" + joiner.join(map.values()) + "]",
map.values().toString());
assertEquals(Sets.newHashSet(map.entrySet()), map.entrySet());
assertEquals(Sets.newHashSet(map.keySet()), map.keySet());
}
}
public static class MapTests extends AbstractMapTests<String, Integer> {
@Override protected Map<String, Integer> makeEmptyMap() {
return ImmutableBiMap.of();
}
@Override protected Map<String, Integer> makePopulatedMap() {
return ImmutableBiMap.of("one", 1, "two", 2, "three", 3);
}
@Override protected String getKeyNotInPopulatedMap() {
return "minus one";
}
@Override protected Integer getValueNotInPopulatedMap() {
return -1;
}
}
public static class InverseMapTests
extends AbstractMapTests<String, Integer> {
@Override protected Map<String, Integer> makeEmptyMap() {
return ImmutableBiMap.of();
}
@Override protected Map<String, Integer> makePopulatedMap() {
return ImmutableBiMap.of(1, "one", 2, "two", 3, "three").inverse();
}
@Override protected String getKeyNotInPopulatedMap() {
return "minus one";
}
@Override protected Integer getValueNotInPopulatedMap() {
return -1;
}
}
public static class CreationTests extends TestCase {
public void testEmptyBuilder() {
ImmutableBiMap<String, Integer> map
= new Builder<String, Integer>().build();
assertEquals(Collections.<String, Integer>emptyMap(), map);
assertEquals(Collections.<Integer, String>emptyMap(), map.inverse());
assertSame(ImmutableBiMap.of(), map);
}
public void testSingletonBuilder() {
ImmutableBiMap<String, Integer> map = new Builder<String, Integer>()
.put("one", 1)
.build();
assertMapEquals(map, "one", 1);
assertMapEquals(map.inverse(), 1, "one");
}
public void testBuilder() {
ImmutableBiMap<String, Integer> map
= ImmutableBiMap.<String, Integer>builder()
.put("one", 1)
.put("two", 2)
.put("three", 3)
.put("four", 4)
.put("five", 5)
.build();
assertMapEquals(map,
"one", 1, "two", 2, "three", 3, "four", 4, "five", 5);
assertMapEquals(map.inverse(),
1, "one", 2, "two", 3, "three", 4, "four", 5, "five");
}
public void testBuilderPutAllWithEmptyMap() {
ImmutableBiMap<String, Integer> map = new Builder<String, Integer>()
.putAll(Collections.<String, Integer>emptyMap())
.build();
assertEquals(Collections.<String, Integer>emptyMap(), map);
}
public void testBuilderPutAll() {
Map<String, Integer> toPut = new LinkedHashMap<String, Integer>();
toPut.put("one", 1);
toPut.put("two", 2);
toPut.put("three", 3);
Map<String, Integer> moreToPut = new LinkedHashMap<String, Integer>();
moreToPut.put("four", 4);
moreToPut.put("five", 5);
ImmutableBiMap<String, Integer> map = new Builder<String, Integer>()
.putAll(toPut)
.putAll(moreToPut)
.build();
assertMapEquals(map,
"one", 1, "two", 2, "three", 3, "four", 4, "five", 5);
assertMapEquals(map.inverse(),
1, "one", 2, "two", 3, "three", 4, "four", 5, "five");
}
public void testBuilderReuse() {
Builder<String, Integer> builder = new Builder<String, Integer>();
ImmutableBiMap<String, Integer> mapOne = builder
.put("one", 1)
.put("two", 2)
.build();
ImmutableBiMap<String, Integer> mapTwo = builder
.put("three", 3)
.put("four", 4)
.build();
assertMapEquals(mapOne, "one", 1, "two", 2);
assertMapEquals(mapOne.inverse(), 1, "one", 2, "two");
assertMapEquals(mapTwo, "one", 1, "two", 2, "three", 3, "four", 4);
assertMapEquals(mapTwo.inverse(),
1, "one", 2, "two", 3, "three", 4, "four");
}
public void testBuilderPutNullKey() {
Builder<String, Integer> builder = new Builder<String, Integer>();
try {
builder.put(null, 1);
fail();
} catch (NullPointerException expected) {
}
}
public void testBuilderPutNullValue() {
Builder<String, Integer> builder = new Builder<String, Integer>();
try {
builder.put("one", null);
fail();
} catch (NullPointerException expected) {
}
}
public void testBuilderPutNullKeyViaPutAll() {
Builder<String, Integer> builder = new Builder<String, Integer>();
try {
builder.putAll(Collections.<String, Integer>singletonMap(null, 1));
fail();
} catch (NullPointerException expected) {
}
}
public void testBuilderPutNullValueViaPutAll() {
Builder<String, Integer> builder = new Builder<String, Integer>();
try {
builder.putAll(Collections.<String, Integer>singletonMap("one", null));
fail();
} catch (NullPointerException expected) {
}
}
public void testPuttingTheSameKeyTwiceThrowsOnBuild() {
Builder<String, Integer> builder = new Builder<String, Integer>()
.put("one", 1)
.put("one", 1); // throwing on this line would be even better
try {
builder.build();
fail();
} catch (IllegalArgumentException expected) {
assertEquals("duplicate key: one", expected.getMessage());
}
}
public void testOf() {
assertMapEquals(
ImmutableBiMap.of("one", 1),
"one", 1);
assertMapEquals(
ImmutableBiMap.of("one", 1).inverse(),
1, "one");
assertMapEquals(
ImmutableBiMap.of("one", 1, "two", 2),
"one", 1, "two", 2);
assertMapEquals(
ImmutableBiMap.of("one", 1, "two", 2).inverse(),
1, "one", 2, "two");
assertMapEquals(
ImmutableBiMap.of("one", 1, "two", 2, "three", 3),
"one", 1, "two", 2, "three", 3);
assertMapEquals(
ImmutableBiMap.of("one", 1, "two", 2, "three", 3).inverse(),
1, "one", 2, "two", 3, "three");
assertMapEquals(
ImmutableBiMap.of("one", 1, "two", 2, "three", 3, "four", 4),
"one", 1, "two", 2, "three", 3, "four", 4);
assertMapEquals(
ImmutableBiMap.of(
"one", 1, "two", 2, "three", 3, "four", 4).inverse(),
1, "one", 2, "two", 3, "three", 4, "four");
assertMapEquals(
ImmutableBiMap.of(
"one", 1, "two", 2, "three", 3, "four", 4, "five", 5),
"one", 1, "two", 2, "three", 3, "four", 4, "five", 5);
assertMapEquals(
ImmutableBiMap.of(
"one", 1, "two", 2, "three", 3, "four", 4, "five", 5).inverse(),
1, "one", 2, "two", 3, "three", 4, "four", 5, "five");
}
public void testOfNullKey() {
try {
ImmutableBiMap.of(null, 1);
fail();
} catch (NullPointerException expected) {
}
try {
ImmutableBiMap.of("one", 1, null, 2);
fail();
} catch (NullPointerException expected) {
}
}
public void testOfNullValue() {
try {
ImmutableBiMap.of("one", null);
fail();
} catch (NullPointerException expected) {
}
try {
ImmutableBiMap.of("one", 1, "two", null);
fail();
} catch (NullPointerException expected) {
}
}
public void testOfWithDuplicateKey() {
try {
ImmutableBiMap.of("one", 1, "one", 1);
fail();
} catch (IllegalArgumentException expected) {
assertEquals("duplicate key: one", expected.getMessage());
}
}
public void testCopyOfEmptyMap() {
ImmutableBiMap<String, Integer> copy
= ImmutableBiMap.copyOf(Collections.<String, Integer>emptyMap());
assertEquals(Collections.<String, Integer>emptyMap(), copy);
assertSame(copy, ImmutableBiMap.copyOf(copy));
assertSame(ImmutableBiMap.of(), copy);
}
public void testCopyOfSingletonMap() {
ImmutableBiMap<String, Integer> copy
= ImmutableBiMap.copyOf(Collections.singletonMap("one", 1));
assertMapEquals(copy, "one", 1);
assertSame(copy, ImmutableBiMap.copyOf(copy));
}
public void testCopyOf() {
Map<String, Integer> original = new LinkedHashMap<String, Integer>();
original.put("one", 1);
original.put("two", 2);
original.put("three", 3);
ImmutableBiMap<String, Integer> copy = ImmutableBiMap.copyOf(original);
assertMapEquals(copy, "one", 1, "two", 2, "three", 3);
assertSame(copy, ImmutableBiMap.copyOf(copy));
}
public void testEmpty() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.of();
assertEquals(Collections.<String, Integer>emptyMap(), bimap);
assertEquals(Collections.<String, Integer>emptyMap(), bimap.inverse());
}
public void testFromHashMap() {
Map<String, Integer> hashMap = Maps.newLinkedHashMap();
hashMap.put("one", 1);
hashMap.put("two", 2);
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of("one", 1, "two", 2));
assertMapEquals(bimap, "one", 1, "two", 2);
assertMapEquals(bimap.inverse(), 1, "one", 2, "two");
}
public void testFromImmutableMap() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
new ImmutableMap.Builder<String, Integer>()
.put("one", 1)
.put("two", 2)
.put("three", 3)
.put("four", 4)
.put("five", 5)
.build());
assertMapEquals(bimap,
"one", 1, "two", 2, "three", 3, "four", 4, "five", 5);
assertMapEquals(bimap.inverse(),
1, "one", 2, "two", 3, "three", 4, "four", 5, "five");
}
public void testDuplicateValues() {
ImmutableMap<String, Integer> map
= new ImmutableMap.Builder<String, Integer>()
.put("one", 1)
.put("two", 2)
.put("uno", 1)
.put("dos", 2)
.build();
try {
ImmutableBiMap.copyOf(map);
fail();
} catch (IllegalArgumentException expected) {
assertEquals("duplicate key: 1", expected.getMessage());
}
}
}
public static class BiMapSpecificTests extends TestCase {
public void testForcePut() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of("one", 1, "two", 2));
try {
bimap.forcePut("three", 3);
fail();
} catch (UnsupportedOperationException expected) {}
}
public void testKeySet() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of("one", 1, "two", 2, "three", 3, "four", 4));
Set<String> keys = bimap.keySet();
assertEquals(Sets.newHashSet("one", "two", "three", "four"), keys);
ASSERT.that(keys).hasContentsInOrder("one", "two", "three", "four");
}
public void testValues() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of("one", 1, "two", 2, "three", 3, "four", 4));
Set<Integer> values = bimap.values();
assertEquals(Sets.newHashSet(1, 2, 3, 4), values);
ASSERT.that(values).hasContentsInOrder(1, 2, 3, 4);
}
public void testDoubleInverse() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of("one", 1, "two", 2));
assertSame(bimap, bimap.inverse().inverse());
}
@GwtIncompatible("SerializableTester")
public void testEmptySerialization() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.of();
assertSame(bimap, SerializableTester.reserializeAndAssert(bimap));
}
@GwtIncompatible("SerializableTester")
public void testSerialization() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of("one", 1, "two", 2));
ImmutableBiMap<String, Integer> copy =
SerializableTester.reserializeAndAssert(bimap);
assertEquals(Integer.valueOf(1), copy.get("one"));
assertEquals("one", copy.inverse().get(1));
assertSame(copy, copy.inverse().inverse());
}
@GwtIncompatible("SerializableTester")
public void testInverseSerialization() {
ImmutableBiMap<String, Integer> bimap = ImmutableBiMap.copyOf(
ImmutableMap.of(1, "one", 2, "two")).inverse();
ImmutableBiMap<String, Integer> copy =
SerializableTester.reserializeAndAssert(bimap);
assertEquals(Integer.valueOf(1), copy.get("one"));
assertEquals("one", copy.inverse().get(1));
assertSame(copy, copy.inverse().inverse());
}
}
private static <K, V> void assertMapEquals(Map<K, V> map,
Object... alternatingKeysAndValues) {
int i = 0;
for (Entry<K, V> entry : map.entrySet()) {
assertEquals(alternatingKeysAndValues[i++], entry.getKey());
assertEquals(alternatingKeysAndValues[i++], entry.getValue());
}
}
}
|
|
package com.codingbat.java;
/**
* The class is contains solution for String-1 section.
*/
public class String1 {
/**
* Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
*
* @param name the input name
* @return the above mentioned new string
*/
public String helloName(String name) {
return "Hello " + name + "!";
}
/**
* Given two strings, a and b, return the result of putting them together in the order abba, e.g.
* "Hi" and "Bye" returns "HiByeByeHi".
*
* @param aText the first input string
* @param bText the second input string
* @return the above mentioned new string
*/
public String makeAbba(String aText, String bText) {
return aText + bText + bText + aText;
}
/**
* The web is built with HTML strings like "<i>Yay</i>" which draws Yay as italic text. In this
* example, the "i" tag makes <i> and </i> which surround the word "Yay". Given tag and word
* strings, create the HTML string with tags around the word, e.g. "<i>Yay</i>".
*
* @param tag the input tag string
* @param word the input word string
* @return the above mentioned new string
*/
public String makeTags(String tag, String word) {
return "<" + tag + ">" + word + "</" + tag + ">";
}
/**
* Given an "out" string length 4, such as "<<>>", and a word, return a new string where the word
* is in the middle of the out string, e.g. "<<word>>". Note: use str.substring(i, j) to extract
* the String starting at index i and going up to but not including index j.
*
* @param out the input outer string
* @param word the input word string
* @return the above defined new string
*/
public String makeOutWord(String out, String word) {
String outFrontPart = out.substring(0, 2);
String outRightPart = out.substring(2, 4);
return outFrontPart + word + outRightPart;
}
/**
* Given a string, return a new string made of 3 copies of the last 2 chars of the original
* string. The string length will be at least 2.
*
* @param str the input string
* @return a new string made of 3 copies of the last 2 chars
*/
public String extraEnd(String str) {
String lastTwoChars = str.substring(str.length() - 2);
return lastTwoChars + lastTwoChars + lastTwoChars;
}
/**
* Given a string, return the string made of its first two chars, so the String "Hello" yields
* "He". If the string is shorter than length 2, return whatever there is, so "X" yields "X", and
* the empty string "" yields the empty string "". Note that str.length() returns the length of a
* string.
*
* @param str the input string
* @return the above defined substring of the input string
*/
public String firstTwo(String str) {
return str.length() < 2 ? str : str.substring(0, 2);
}
/**
* Given a string of even length, return the first half. So the string "WooHoo" yields "Woo".
*
* @param str the input string
* @return the first half of the input string
*/
public String firstHalf(String str) {
return str.substring(0, str.length() / 2);
}
/**
* Given a string, return a version without the first and last char, so "Hello" yields "ell". The
* string length will be at least 2.
*
* @param str the input string
* @return the input string without the first and last char
*/
public String withoutEnd(String str) {
return str.length() > 1 ? str.substring(1, str.length() - 1) : "";
}
/**
* Given 2 strings, a and b, return a string of the form short+long+short, with the shorter string
* on the outside and the longer string on the inside. The strings will not be the same length,
* but they may be empty (length 0).
*
* @param a the first input string
* @param b the second input string
* @return a string of the form short+long+short input strings
*/
public String comboString(String a, String b) {
return a.length() > b.length() ? b + a + b : a + b + a;
}
/**
* Given 2 strings, return their concatenation, except omit the first char of each. The strings
* will be at least length 1.
*
* @param a the first input string
* @param b the second input string
* @return the concatenation of the input strings without the first chars
*/
public String nonStart(String a, String b) {
String shortA = a.length() > 0 ? a.substring(1) : a;
String shortB = b.length() > 0 ? b.substring(1) : b;
return shortA + shortB;
}
/**
* Given a string, return a "rotated left 2" version where the first 2 chars are moved to the end.
* The string length will be at least 2.
*
* @param str the input string
* @return the rotated left 2 version of the input string
*/
public String left2(String str) {
return str.length() > 2 ? str.substring(2) + str.substring(0, 2) : str;
}
/**
* Given a string, return a "rotated right 2" version where the last 2 chars are moved to the
* start. The string length will be at least 2.
*
* @param str the input string
* @return the rotated right 2 version of the input string
*/
public String right2(String str) {
String lastTwoChars = str.substring(str.length() - 2);
String frontPart = str.substring(0, str.length() - 2);
return str.length() > 2 ? lastTwoChars + frontPart : str;
}
/**
* Given a string, return a string length 1 from its front, unless front is false, in which case
* return a string length 1 from its back. The string will be non-empty.
*
* @param str the input string
* @param front request the front or the back part of the input string
* @return the front or the back part of the input string
*/
public String theEnd(String str, boolean front) {
return front && str.length() > 0 ? str.substring(0, 1) : str.substring(str.length() - 1,
str.length());
}
/**
* Given a string, return a version without both the first and last char of the string. The string
* may be any length, including 0.
*
* @param str the input string
* @return a version without both the first and last char of the input string
*/
public String withouEnd2(String str) {
return str.length() > 2 ? str.substring(1, str.length() - 1) : "";
}
/**
* Given a string of even length, return a string made of the middle two chars, so the string
* "string" yields "ri". The string length will be at least 2.
*
* @param str the input string with even length
* @return a string made of the middle two chars
*/
public String middleTwo(String str) {
return str.length() > 1 ? str.substring(str.length() / 2 - 1, str.length() / 2 + 1) : str;
}
/**
* Given a string, return true if it ends in "ly".
*
* @param str the input string
* @return true, if the string ends with "ly"
*/
public boolean endsLy(String str) {
return str.length() > 1 ? str.substring(str.length() - 2, str.length()).equals("ly") : false;
}
/**
* Given a string and an int n, return a string made of the first and last n chars from the
* string. The string length will be at least n.
*
* @param str the input string
* @param n the number of chars from the front and the back of the input string
* @return a string made of the first and last n chars
*/
public String nTwice(String str, int n) {
String result = str;
if (str.length() > n - 1) {
String frontChars = str.substring(0, n);
String backChars = str.substring(str.length() - n, str.length());
result = frontChars + backChars;
}
return result;
}
/**
* Given a string and an index, return a string length 2 starting at the given index. If the index
* is too big or too small to define a string length 2, use the first 2 chars. The string length
* will be at least 2.
*
* @param str the input string
* @param index the start index from the input string
* @return a string length 2 starting at the given index
*/
public String twoChar(String str, int index) {
return index > 0 && index < str.length() - 1 ? str.substring(index, index + 2) : str.substring(
0, 2);
}
/**
* Given a string of odd length, return the string length 3 from its middle, so "Candy" yields
* "and". The string length will be at least 3.
*
* @param str the input string
* @return the string length 3 from its middle of the input string
*/
public String middleThree(String str) {
int middlePoint = str.length() / 2;
return str.length() > 2 ? str.substring(middlePoint - 1, middlePoint + 2) : str;
}
/**
* Given a string, return true if "bad" appears starting at index 0 or 1 in the string, such as
* with "badxxx" or "xbadxx" but not "xxbadxx". The string may be any length, including 0.
*
* @param str the input string
* @return true, if "bad" appears starting at index 0 or 1 in the string
*/
public boolean hasBad(String str) {
String word = "bad";
int indexOf = str.lastIndexOf(word);
return indexOf == 0 || indexOf == 1;
}
/**
* Given a string, return a string length 2 made of its first 2 chars. If the string length is
* less than 2, use '@' for the missing chars.
*
* @param str the input string
* @return a string length 2 made of its first 2 chars
*/
public String atFirst(String str) {
return str.length() > 1 ? str.substring(0, 2) : (str + "@@").substring(0, 2);
}
/**
* Given 2 strings, a and b, return a new string made of the first char of a and the last char of
* b, so "yo" and "java" yields "ya". If either string is length 0, use '@' for its missing char.
*
* @param a the first input string
* @param b the second input string
* @return a new string made of the first char of a and the last char of b
*/
public String lastChars(String a, String b) {
String result = "";
if (a.isEmpty()) {
result += "@";
} else {
result += a.charAt(0);
}
if (b.isEmpty()) {
result += "@";
} else {
result += b.charAt(b.length() - 1);
}
return result;
}
/**
* Given two strings, append them together (known as "concatenation") and return the result.
* However, if the concatenation creates a double-char, then omit one of the chars, so "abc" and
* "cat" yields "abcat".
*
* @param a the first input string
* @param b the second input string
* @return the concatenation of the two input strings
*/
public String conCat(String a, String b) {
String res = "";
if (a.length() > 0 && b.length() > 0) {
if (a.charAt(a.length() - 1) == b.charAt(0)) {
res = a + b.substring(1);
} else {
res = a + b;
}
} else {
res = a + b;
}
return res;
}
/**
* Given a string of any length, return a new string where the last 2 chars, if present, are
* swapped, so "coding" yields "codign".
*
* @param str the input string
* @return a new string where the last 2 chars, are swapped
*/
public String lastTwo(String str) {
return str.length() > 1 ? str.substring(0, str.length() - 2) + str.charAt(str.length() - 1)
+ str.charAt(str.length() - 2) : str;
}
/**
* Given a string, if the string begins with "red" or "blue" return that color string, otherwise
* return the empty string.
*
* @param str the input string
* @return a "red", a "blue" or an empty string
*/
public String seeColor(String str) {
if (str.startsWith("red")) {
return "red";
}
if (str.startsWith("blue")) {
return "blue";
}
return "";
}
/**
* Given a string, return true if the first 2 chars in the string also appear at the end of the
* string, such as with "edited".
*
* @param str the input string
* @return true if the first 2 chars in the string also appear at the end of the string
*/
public boolean frontAgain(String str) {
if (str.length() > 1 && str.substring(0, 2).equals(str.substring(str.length() - 2))) {
return true;
}
return false;
}
/**
* Given two strings, append them together (known as "concatenation") and return the result.
* However, if the strings are different lengths, omit chars from the longer string so it is the
* same length as the shorter string. So "Hello" and "Hi" yield "loHi". The strings may be any
* length.
*
* @param a the first string
* @param b the second string
* @return the above described concatenation
*/
public String minCat(String a, String b) {
if (a.length() > b.length()) {
return a.substring(a.length() - b.length()) + b;
} else {
return a + b.substring(b.length() - a.length());
}
}
/**
* Given a string, return a new string made of 3 copies of the first 2 chars of the original
* string. The string may be any length. If there are fewer than 2 chars, use whatever is there.
*
* @param str the input string
* @return a new string made of 3 copies of the first 2 chars of the input string
*/
public String extraFront(String str) {
String base = str.length() > 2 ? str.substring(0, 2) : str;
return base + base + base;
}
/**
* Given a string, if a length 2 substring appears at both its beginning and end, return a string
* without the substring at the beginning, so "HelloHe" yields "lloHe". The substring may overlap
* with itself, so "Hi" yields "". Otherwise, return the original string unchanged.
*
* @param str the input string
* @return an above defined new string
*/
public String without2(String str) {
if (str.length() > 2 && str.substring(0, 2).equals(str.substring(str.length() - 2))) {
return str.substring(2);
}
if (str.length() == 2) {
return "";
}
return str;
}
/**
* Given a string, return a version without the first 2 chars. Except keep the first char if it is
* 'a' and keep the second char if it is 'b'. The string may be any length. Harder than it looks.
*
* @param str the input string
* @return an above defined new string
*/
public String deFront(String str) {
if (!str.isEmpty()) {
if (str.length() > 1) {
if (str.charAt(0) == 'a' && str.charAt(1) != 'b') {
return "a" + str.substring(2);
} else if (str.charAt(0) != 'a' && str.charAt(1) == 'b') {
return str.substring(1);
} else if (str.charAt(1) != 'b') {
return str.substring(2);
}
}
if (str.length() == 1) {
if (str.charAt(0) != 'a') {
return "";
}
}
}
return str;
}
/**
* Given a string and a second "word" string, we'll say that the word matches the string if it
* appears at the front of the string, except its first char does not need to match exactly. On a
* match, return the front of the string, or otherwise return the empty string. So, so with the
* string "hippo" the word "hi" returns "hi" and "xip" returns "hip". The word will be at least
* length 1.
*
* @param str the input string
* @param word the input word
* @return an above defined new string
*/
public String startWord(String str, String word) {
if (str.length() > 0 && word.length() > 0) {
String strSub = str.substring(1);
String wordSub = word.substring(1);
if (strSub.startsWith(wordSub)) {
return str.charAt(0) + wordSub;
}
}
return "";
}
/**
* Given a string, if the first or last chars are 'x', return the string without those 'x' chars,
* and otherwise return the string unchanged.
*
* @param str the input string
* @return an above defined new string
*/
public String withoutX(String str) {
if (str.length() > 0 && str.charAt(0) == 'x') {
str = str.substring(1);
}
if (str.length() > 0 && str.charAt(str.length() - 1) == 'x') {
str = str.substring(0, str.length() - 1);
}
return str;
}
/**
* Given a string, if one or both of the first 2 chars is 'x', return the string without those 'x'
* chars, and otherwise return the string unchanged. This is a little harder than it looks.
*
* @param str the input string
* @return an above described new string
*/
public String withoutX2(String str) {
if (str.length() == 1 && str.charAt(0) == 'x') {
return "";
}
if (str.length() >= 2) {
if (str.charAt(0) == 'x' && str.charAt(1) != 'x') {
return str.substring(1);
} else if (str.charAt(0) != 'x' && str.charAt(1) == 'x') {
return str.charAt(0) + str.substring(2);
} else if (str.charAt(0) == 'x') {
return str.substring(2);
}
}
return str;
}
}
|
|
package de.epiceric.shopchest.utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import org.bukkit.Bukkit;
import org.bukkit.Chunk;
import org.bukkit.Location;
import org.bukkit.OfflinePlayer;
import org.bukkit.block.Chest;
import org.bukkit.block.DoubleChest;
import org.bukkit.entity.Player;
import org.bukkit.inventory.InventoryHolder;
import org.bukkit.permissions.PermissionAttachmentInfo;
import org.bukkit.util.Vector;
import de.epiceric.shopchest.ShopChest;
import de.epiceric.shopchest.config.Config;
import de.epiceric.shopchest.event.ShopsLoadedEvent;
import de.epiceric.shopchest.shop.Shop;
import de.epiceric.shopchest.shop.Shop.ShopType;
public class ShopUtils {
private final Map<UUID, Counter> playerShopAmount = new HashMap<>();
// concurrent since it is updated in async task
private final Map<UUID, Location> playerLocation = new ConcurrentHashMap<>();
private final Map<Location, Shop> shopLocation = new ConcurrentHashMap<>();
private final Collection<Shop> shopLocationValues = Collections.unmodifiableCollection(shopLocation.values());
private final ShopChest plugin;
public ShopUtils(ShopChest plugin) {
this.plugin = plugin;
}
/**
* Get the shop at a given location
*
* @param location Location of the shop
* @return Shop at the given location or <b>null</b> if no shop is found there
*/
public Shop getShop(Location location) {
Location newLocation = new Location(location.getWorld(), location.getBlockX(),
location.getBlockY(), location.getBlockZ());
return shopLocation.get(newLocation);
}
/**
* Checks whether there is a shop at a given location
* @param location Location to check
* @return Whether there is a shop at the given location
*/
public boolean isShop(Location location) {
return getShop(location) != null;
}
/**
* Get a collection of all loaded shops
* <p>
* This collection is safe to use for looping over and removing shops.
*
* @return Read-only collection of all shops, may contain duplicates for double chests
*/
public Collection<Shop> getShops() {
return Collections.unmodifiableCollection(new ArrayList<>(shopLocationValues));
}
/**
* Get all shops
*
* @see #getShops()
* @return Copy of collection of all shops, may contain duplicates
* @deprecated Use {@link #getShops()} instead
*/
@Deprecated
public Collection<Shop> getShopsCopy() {
return new ArrayList<>(getShops());
}
/**
* Add a shop
* @param shop Shop to add
* @param addToDatabase Whether the shop should also be added to the database
* @param callback Callback that - if succeeded - returns the ID the shop had or was given (as {@code int})
*/
public void addShop(Shop shop, boolean addToDatabase, Callback<Integer> callback) {
InventoryHolder ih = shop.getInventoryHolder();
plugin.debug("Adding shop... (#" + shop.getID() + ")");
if (ih instanceof DoubleChest) {
DoubleChest dc = (DoubleChest) ih;
Chest r = (Chest) dc.getRightSide();
Chest l = (Chest) dc.getLeftSide();
plugin.debug("Added shop as double chest. (#" + shop.getID() + ")");
shopLocation.put(r.getLocation(), shop);
shopLocation.put(l.getLocation(), shop);
} else {
plugin.debug("Added shop as single chest. (#" + shop.getID() + ")");
shopLocation.put(shop.getLocation(), shop);
}
if (addToDatabase) {
if (shop.getShopType() != ShopType.ADMIN) {
playerShopAmount.compute(shop.getVendor().getUniqueId(), (uuid, amount) -> amount == null ? new Counter(1) : amount.increment());
}
plugin.getShopDatabase().addShop(shop, callback);
} else {
if (callback != null) callback.callSyncResult(shop.getID());
}
}
/**
* Add a shop
* @param shop Shop to add
* @param addToDatabase Whether the shop should also be added to the database
*/
public void addShop(Shop shop, boolean addToDatabase) {
addShop(shop, addToDatabase, null);
}
/**
* Removes (i.e. unloads) all currently loaded shops
*/
public void removeShops() {
shopLocation.forEach((location, shop) -> {
if (!shop.isCreated()) return;
plugin.debug("Removing shop " + shop.getID());
shop.removeItem();
shop.removeHologram();
});
shopLocation.clear();
}
/** Remove a shop. May not work properly if double chest doesn't exist!
* @param shop Shop to remove
* @param removeFromDatabase Whether the shop should also be removed from the database
* @param callback Callback that - if succeeded - returns null
* @see ShopUtils#removeShopById(int, boolean, Callback)
*/
public void removeShop(Shop shop, boolean removeFromDatabase, Callback<Void> callback) {
plugin.debug("Removing shop (#" + shop.getID() + ")");
if (shop.isCreated()) {
InventoryHolder ih = shop.getInventoryHolder();
if (ih instanceof DoubleChest) {
DoubleChest dc = (DoubleChest) ih;
Chest r = (Chest) dc.getRightSide();
Chest l = (Chest) dc.getLeftSide();
shopLocation.remove(r.getLocation());
shopLocation.remove(l.getLocation());
} else {
shopLocation.remove(shop.getLocation());
}
shop.removeItem();
shop.removeHologram();
}
if (removeFromDatabase) {
if (shop.getShopType() != ShopType.ADMIN) {
playerShopAmount.compute(shop.getVendor().getUniqueId(), (uuid, amount) -> amount == null ? new Counter() : amount.decrement());
}
plugin.getShopDatabase().removeShop(shop, callback);
} else {
if (callback != null) callback.callSyncResult(null);
}
}
/**
* Remove a shop. May not work properly if double chest doesn't exist!
* @param shop Shop to remove
* @param removeFromDatabase Whether the shop should also be removed from the database
* @see ShopUtils#removeShopById(int, boolean)
*/
public void removeShop(Shop shop, boolean removeFromDatabase) {
removeShop(shop, removeFromDatabase, null);
}
/**
* Remove a shop by its ID
* @param shopId ID of the shop to remove
* @param removeFromDatabase Whether the shop should also be removed from the database
* @param callback Callback that - if succeeded - returns null
*/
public void removeShopById(int shopId, boolean removeFromDatabase, Callback<Void> callback) {
Map<Location, Shop> toRemove = shopLocation.entrySet().stream()
.filter(e -> e.getValue().getID() == shopId)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
plugin.debug(String.format("Removing %d shop(s) with ID %d", toRemove.size(), shopId));
if (toRemove.isEmpty()) {
if (callback != null) callback.callSyncResult(null);
return;
}
toRemove.forEach((loc, shop) -> {
shopLocation.remove(loc);
shop.removeItem();
shop.removeHologram();
});
Shop first = toRemove.values().iterator().next();
boolean isAdmin = first.getShopType() == ShopType.ADMIN;
UUID vendorUuid = first.getVendor().getUniqueId();
// Database#removeShop removes shop by ID so this only needs to be called once
if (removeFromDatabase) {
if (!isAdmin) {
playerShopAmount.compute(vendorUuid, (uuid, amount) -> amount == null ? new Counter() : amount.decrement());
}
plugin.getShopDatabase().removeShop(toRemove.values().iterator().next(), callback);
} else {
if (callback != null) callback.callSyncResult(null);
}
}
/**
* Remove a shop by its ID
* @param shopId ID of the shop to remove
* @param removeFromDatabase Whether the shop should also be removed from the database
*/
public void removeShopById(int shopId, boolean removeFromDatabase) {
removeShopById(shopId, removeFromDatabase, null);
}
/**
* Get the shop limits of a player
* @param p Player, whose shop limits should be returned
* @return The shop limits of the given player
*/
public int getShopLimit(Player p) {
int limit = 0;
boolean useDefault = true;
for (PermissionAttachmentInfo permInfo : p.getEffectivePermissions()) {
if (permInfo.getPermission().startsWith("shopchest.limit.") && p.hasPermission(permInfo.getPermission())) {
if (permInfo.getPermission().equalsIgnoreCase(Permissions.NO_LIMIT)) {
limit = -1;
useDefault = false;
break;
} else {
String[] spl = permInfo.getPermission().split("shopchest.limit.");
if (spl.length > 1) {
try {
int newLimit = Integer.valueOf(spl[1]);
if (newLimit < 0) {
limit = -1;
break;
}
limit = Math.max(limit, newLimit);
useDefault = false;
} catch (NumberFormatException ignored) {
/* Ignore and continue */
}
}
}
}
}
if (limit < -1) limit = -1;
return (useDefault ?Config.defaultLimit : limit);
}
/**
* Get the amount of shops of a player
* @param p Player, whose shops should be counted
* @return The amount of a shops a player has (if {@link Config#excludeAdminShops} is true, admin shops won't be counted)
*/
public int getShopAmount(OfflinePlayer p) {
return playerShopAmount.getOrDefault(p.getUniqueId(), new Counter()).get();
}
/**
* Get all shops of a player from the database without loading them
* @param p Player, whose shops should be get
* @param callback Callback that returns a collection of the given player's shops
*/
public void getShops(OfflinePlayer p, Callback<Collection<Shop>> callback) {
plugin.getShopDatabase().getShops(p.getUniqueId(), new Callback<Collection<Shop>>(plugin) {
@Override
public void onResult(Collection<Shop> result) {
Set<Shop> shops = new HashSet<>();
for (Shop playerShop : result) {
Shop loadedShop = getShop(playerShop.getLocation());
if (loadedShop != null && loadedShop.equals(playerShop)) {
shops.add(loadedShop);
} else {
shops.add(playerShop);
}
}
if (callback != null) callback.onResult(shops);
}
@Override
public void onError(Throwable throwable) {
if (callback != null) callback.onError(throwable);
}
});
}
/**
* Loads the amount of shops for each player
* @param callback Callback that returns the amount of shops for each player
*/
public void loadShopAmounts(final Callback<Map<UUID, Integer>> callback) {
plugin.getShopDatabase().getShopAmounts(new Callback<Map<UUID,Integer>>(plugin) {
@Override
public void onResult(Map<UUID, Integer> result) {
playerShopAmount.clear();
result.forEach((uuid, amount) -> playerShopAmount.put(uuid, new Counter(amount)));
if (callback != null) callback.onResult(result);
}
@Override
public void onError(Throwable throwable) {
if (callback != null) callback.onError(throwable);
}
});
}
/**
* Gets all shops in the given chunk from the database and adds them to the server
* @param chunk The chunk to load shops from
* @param callback Callback that returns the amount of shops added if succeeded
* @see ShopUtils#loadShops(Chunk[], Callback)
*/
public void loadShops(final Chunk chunk, final Callback<Integer> callback) {
loadShops(new Chunk[] {chunk}, callback);
}
/**
* Gets all shops in the given chunks from the database and adds them to the server
* @param chunk The chunks to load shops from
* @param callback Callback that returns the amount of shops added if succeeded
* @see ShopUtils#loadShops(Chunk Callback)
*/
public void loadShops(final Chunk[] chunks, final Callback<Integer> callback) {
plugin.getShopDatabase().getShopsInChunks(chunks, new Callback<Collection<Shop>>(plugin) {
@Override
public void onResult(Collection<Shop> result) {
Collection<Shop> loadedShops = new HashSet<>();
for (Shop shop : result) {
Location loc = shop.getLocation();
// Don't add shop if shop is already loaded
if (shopLocation.containsKey(loc)) {
continue;
}
int x = loc.getBlockX() / 16;
int z = loc.getBlockZ() / 16;
// Don't add shop if chunk is no longer loaded
if (!loc.getWorld().isChunkLoaded(x, z)) {
continue;
}
if (shop.create(true)) {
addShop(shop, false);
loadedShops.add(shop);
}
}
if (callback != null) callback.onResult(loadedShops.size());
Bukkit.getPluginManager().callEvent(new ShopsLoadedEvent(Collections.unmodifiableCollection(loadedShops)));
}
@Override
public void onError(Throwable throwable) {
if (callback != null) callback.onError(throwable);
}
});
}
/**
* Update hologram and item of all shops for a player
* @param player Player to show the updates
*/
public void updateShops(Player player) {
updateShops(player, false);
}
/**
* Update hologram and item of all shops for a player
* @param player Player to show the updates
* @param force Whether update should be forced even if player has not moved
*/
public void updateShops(Player player, boolean force) {
if (!force && player.getLocation().equals(playerLocation.get(player.getUniqueId()))) {
// Player has not moved, so don't calculate shops again.
return;
}
if (Config.onlyShowShopsInSight) {
updateVisibleShops(player);
} else {
updateNearestShops(player);
}
playerLocation.put(player.getUniqueId(), player.getLocation());
}
/**
* Remove a saved location of a player to force a recalculation
* of whether the hologram should be visible.
* This should only be called when really needed
* @param player Player whose saved location will be reset
*/
public void resetPlayerLocation(Player player) {
playerLocation.remove(player.getUniqueId());
}
private void updateVisibleShops(Player player) {
double itemDistSquared = Math.pow(Config.maximalItemDistance, 2);
double maxDist = Config.maximalDistance;
double nearestDistSquared = Double.MAX_VALUE;
Shop nearestShop = null;
Location pLoc = player.getEyeLocation();
Vector pDir = pLoc.getDirection();
// Display holograms based on sight
for (double i = 0; i <= maxDist; i++) {
Location loc = pLoc.clone();
Vector dir = pDir.clone();
double factor = Math.min(i, maxDist);
loc.add(dir.multiply(factor));
Location locBelow = loc.clone().subtract(0, 1, 0);
// Check block below as player may look at hologram
Shop shop = getShop(loc);
if (shop == null) {
shop = getShop(locBelow);
}
if (shop != null && shop.hasHologram()) {
double distSquared = pLoc.distanceSquared(loc);
if (distSquared < nearestDistSquared) {
nearestDistSquared = distSquared;
nearestShop = shop;
}
}
}
for (Shop shop : getShops()) {
if (!shop.equals(nearestShop) && shop.hasHologram()) {
shop.getHologram().hidePlayer(player);
}
// Display item based on distance
Location shopLocation = shop.getLocation();
if (shopLocation.getWorld().getName().equals(player.getWorld().getName())) {
double distSquared = shop.getLocation().distanceSquared(player.getLocation());
if (shop.hasItem()) {
if (distSquared <= itemDistSquared) {
shop.getItem().showPlayer(player);
} else {
shop.getItem().hidePlayer(player);
}
}
}
}
if (nearestShop != null) {
nearestShop.getHologram().showPlayer(player);
}
}
private void updateNearestShops(Player p) {
double holoDistSqr = Math.pow(Config.maximalDistance, 2);
double itemDistSqr = Math.pow(Config.maximalItemDistance, 2);
Location playerLocation = p.getLocation();
for (Shop shop : getShops()) {
if (playerLocation.getWorld().getName().equals(shop.getLocation().getWorld().getName())) {
double distSqr = shop.getLocation().distanceSquared(playerLocation);
if (shop.hasHologram()) {
if (distSqr <= holoDistSqr) {
shop.getHologram().showPlayer(p);
} else {
shop.getHologram().hidePlayer(p);
}
}
if (shop.hasItem()) {
if (distSqr <= itemDistSqr) {
shop.getItem().showPlayer(p);
} else {
shop.getItem().hidePlayer(p);
}
}
}
}
}
}
|
|
package de.djuelg.neuronizer.presentation.ui.fragments;
import android.content.Context;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.design.widget.FloatingActionButton;
import android.support.v4.app.Fragment;
import android.support.v7.preference.PreferenceManager;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.view.inputmethod.InputMethodManager;
import com.fernandocejas.arrow.optional.Optional;
import butterknife.BindView;
import butterknife.ButterKnife;
import butterknife.Unbinder;
import de.djuelg.neuronizer.R;
import de.djuelg.neuronizer.domain.executor.impl.ThreadExecutor;
import de.djuelg.neuronizer.domain.model.preview.Note;
import de.djuelg.neuronizer.presentation.presenters.DisplayNotePresenter;
import de.djuelg.neuronizer.presentation.presenters.impl.DisplayNotePresenterImpl;
import de.djuelg.neuronizer.presentation.ui.custom.FragmentInteractionListener;
import de.djuelg.neuronizer.presentation.ui.custom.ShareIntent;
import de.djuelg.neuronizer.presentation.ui.custom.view.RichEditorNavigation;
import de.djuelg.neuronizer.storage.RepositoryImpl;
import de.djuelg.neuronizer.threading.MainThreadImpl;
import jp.wasabeef.richeditor.RichEditor;
import static de.djuelg.neuronizer.presentation.ui.Constants.KEY_EDITOR_CONTENT;
import static de.djuelg.neuronizer.presentation.ui.Constants.KEY_PREF_ACTIVE_REPO;
import static de.djuelg.neuronizer.presentation.ui.Constants.KEY_TITLE;
import static de.djuelg.neuronizer.presentation.ui.Constants.KEY_UUID;
import static de.djuelg.neuronizer.presentation.ui.custom.Clipboard.copyToClipboard;
import static de.djuelg.neuronizer.presentation.ui.custom.MarkdownConverter.convertToMarkdown;
import static de.djuelg.neuronizer.presentation.ui.custom.view.AppbarCustomizer.changeAppbarTitle;
import static de.djuelg.neuronizer.presentation.ui.custom.view.AppbarCustomizer.configureAppbar;
import static de.djuelg.neuronizer.storage.RepositoryManager.FALLBACK_REALM;
/**
*
*/
@SuppressWarnings("ConstantConditions")
public class NoteFragment extends Fragment implements DisplayNotePresenter.View, View.OnClickListener {
@BindView(R.id.richEditor_item_details) RichEditor richEditor;
@BindView(R.id.button_save_item) FloatingActionButton saveButton;
private FragmentInteractionListener mListener;
private DisplayNotePresenter mPresenter;
private Unbinder mUnbinder;
private String uuid;
private String title;
public NoteFragment() {
}
/**
* Use this factory method to create a new instance of
* this fragment using the provided parameters.
*/
public static NoteFragment newInstance(String uuid, String title) {
NoteFragment fragment = new NoteFragment();
Bundle args = new Bundle();
args.putString(KEY_UUID, uuid);
args.putString(KEY_TITLE, title);
fragment.setArguments(args);
return fragment;
}
@Override
public void onAttach(Context context) {
super.onAttach(context);
if (context instanceof FragmentInteractionListener) {
mListener = (FragmentInteractionListener) context;
} else {
throw new RuntimeException(context.toString()
+ " must implement OnInteractionListener");
}
}
@Override
public void onDetach() {
super.onDetach();
mListener = null;
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setHasOptionsMenu(true);
SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(getActivity());
String repositoryName = sharedPreferences.getString(KEY_PREF_ACTIVE_REPO, FALLBACK_REALM);
mPresenter = new DisplayNotePresenterImpl(
ThreadExecutor.getInstance(),
MainThreadImpl.getInstance(),
this,
new RepositoryImpl(repositoryName)
);
Bundle bundle = getArguments();
if (bundle != null) {
uuid = bundle.getString(KEY_UUID);
title = bundle.getString(KEY_TITLE);
}
}
@Override
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
final View view = inflater.inflate(R.layout.fragment_note, container, false);
mUnbinder = ButterKnife.bind(this, view);
saveButton.setOnClickListener(this);
final RichEditorNavigation richEditorNavigation = new RichEditorNavigation(view, richEditor);
richEditorNavigation.setupRichEditor();
richEditorNavigation.setupOnClickListeners();
configureAppbar(getActivity(), true);
changeAppbarTitle(getActivity(), title);
if (savedInstanceState == null) {
mPresenter.loadNote(uuid);
}
// Inflate the layout for this fragment
return view;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
if (savedInstanceState != null && savedInstanceState.getString(KEY_EDITOR_CONTENT) != null) {
richEditor.setHtml(savedInstanceState.getString(KEY_EDITOR_CONTENT));
}
}
@Override
public void onSaveInstanceState(@NonNull final Bundle outState) {
super.onSaveInstanceState(outState);
outState.putString(KEY_EDITOR_CONTENT, richEditor.getHtml());
}
@Override
public void onDestroyView() {
super.onDestroyView();
mUnbinder.unbind();
}
@Override
public void onPause() {
super.onPause();
InputMethodManager inputManager = (InputMethodManager) getActivity().getSystemService(Context.INPUT_METHOD_SERVICE);
inputManager.hideSoftInputFromWindow(getView().getWindowToken(), 0);
saveNoteToRepository();
}
@Override
public void onNoteLoaded(Optional<Note> note) {
if (!note.isPresent()) {
// The note with uuid doesn't exist -> return to previous fragment
getFragmentManager().popBackStack();
return;
}
richEditor.setHtml(note.get().getBody());
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
super.onCreateOptionsMenu(menu, inflater);
inflater.inflate(R.menu.menu_note, menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
getFragmentManager().popBackStack();
return true;
case R.id.action_clipboard:
copyHtmlAsMarkdownToClipboard();
return true;
case R.id.action_settings:
mListener.onSettingsSelected();
return true;
case R.id.action_share:
ShareIntent.withTitle(title).withHtml(richEditor.getHtml()).send(getContext());
return true;
}
return false;
}
private void saveNoteToRepository() {
mPresenter.editNote(uuid, richEditor.getHtml());
}
private void copyHtmlAsMarkdownToClipboard() {
String html = richEditor.getHtml();
copyToClipboard(getContext(), convertToMarkdown(html));
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.button_save_item:
getFragmentManager().popBackStack();
break;
}
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sis.console;
import java.util.Set;
import java.util.Map;
import java.util.HashMap;
import java.util.Collections;
import java.util.Locale;
import java.util.ResourceBundle;
import java.util.ServiceLoader;
import java.net.URLClassLoader;
import java.net.URL;
import java.io.Console;
import java.io.IOException;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.sql.Connection; // For javadoc.
import org.apache.sis.internal.system.DataDirectory;
import org.apache.sis.util.resources.Errors;
import org.apache.sis.internal.util.X364;
import org.apache.sis.internal.util.Fallback;
import org.apache.sis.setup.InstallationResources;
import static org.apache.sis.internal.util.Constants.EPSG;
// Branch-dependent imports
import java.nio.file.AccessDeniedException;
import java.nio.file.Path;
/**
* A provider for data licensed under different terms of use than the Apache license.
* This class is in charge of downloading the data if necessary and asking user's agreement
* before to install them. Authorities managed by the current implementation are:
*
* <ul>
* <li>{@code "EPSG"} for the EPSG geodetic dataset.</li>
* </ul>
*
* @author Martin Desruisseaux (Geomatys)
* @since 0.7
* @version 0.7
* @module
*/
@Fallback
public class ResourcesDownloader extends InstallationResources {
/**
* Where to download the EPSG scripts after user has approved the terms of use.
*/
private static final String DOWNLOAD_URL = "http://repo1.maven.org/maven2/org/apache/sis/non-free/sis-epsg/0.7/sis-epsg-0.7.jar";
/**
* Estimation of the EPSG database size after installation, in mega-bytes.
* This is for information purpose only.
*/
private static final int DATABASE_SIZE = 20;
/**
* The console to use for printing EPSG terms of use and asking for agreement, or {@code null} if none.
*/
private final Console console;
/**
* The locale to use for text display.
*/
private final Locale locale;
/**
* {@code true} if colors can be applied for ANSI X3.64 compliant terminal.
*/
private final boolean colors;
/**
* The provider to use for fetching the actual licensed data after we got user's agreement.
*/
private InstallationResources provider;
/**
* The target directory where to install the database.
*/
private final Path directory;
/**
* The localized answers expected from the users. Keys are words like "Yes" or "No"
* and boolean values are the meaning of the keys.
*/
private final Map<String,Boolean> answers = new HashMap<>();
/**
* {@code true} if the user has accepted the EPSG terms of use, {@code false} if (s)he refused,
* or {@code null} if (s)he did not yet answered that question.
*/
private Boolean accepted;
/**
* Creates a new installation scripts provider.
*/
public ResourcesDownloader() {
final CommandRunner command = CommandRunner.instance;
if (command != null) {
locale = command.locale;
colors = command.colors;
} else {
locale = Locale.getDefault();
colors = false;
}
console = System.console();
directory = DataDirectory.DATABASES.getDirectory();
}
/**
* Returns the name of the authority who provides data under non-Apache terms of use.
* If this {@code ResourcesDownloader} can not ask user's agreement because there is
* no {@link Console} attached to the current Java virtual machine, then this method
* returns an empty set.
*
* @return {@code "EPSG"} or an empty set.
*/
@Override
public Set<String> getAuthorities() {
return (console != null && directory != null) ? Collections.singleton(EPSG) : Collections.emptySet();
}
/**
* Downloads the provider to use for fetching the actual licensed data after we got user's agreement.
*/
private static InstallationResources download() throws IOException {
for (final InstallationResources c : ServiceLoader.load(InstallationResources.class,
new URLClassLoader(new URL[] {new URL(DOWNLOAD_URL)})))
{
if (!c.getClass().isAnnotationPresent(Fallback.class) && c.getAuthorities().contains(EPSG)) {
return c;
}
}
throw new FileNotFoundException(); // Should not happen.
}
/**
* Returns the provider to use for fetching the actual licensed data after we got user's agreement.
* This method asks for user's agreement when first invoked.
*
* @param requireAgreement {@code true} if license agreement is required.
* @throws AccessDeniedException if the user does not accept to install the EPSG dataset.
* @throws IOException if an error occurred while reading the {@link #DOWNLOAD_URL}.
*/
private synchronized InstallationResources provider(final String authority, final boolean requireAgreement)
throws IOException
{
if (!EPSG.equals(authority)) {
throw new IllegalArgumentException(Errors.format(Errors.Keys.IllegalArgumentValue_2, "authority", authority));
}
final ResourceBundle resources = ResourceBundle.getBundle("org.apache.sis.console.Messages", locale);
if (answers.isEmpty()) {
for (final String r : resources.getString("yes").split("\\|")) answers.put(r, Boolean.TRUE);
for (final String r : resources.getString("no" ).split("\\|")) answers.put(r, Boolean.FALSE);
}
final String textColor, linkColor, linkOff, actionColor, resetColor;
if (colors) {
textColor = X364.FOREGROUND_YELLOW .sequence();
linkColor = X364.UNDERLINE .sequence();
linkOff = X364.NO_UNDERLINE .sequence();
actionColor = X364.FOREGROUND_GREEN .sequence();
resetColor = X364.FOREGROUND_DEFAULT.sequence();
} else {
textColor = linkColor = linkOff = actionColor = resetColor = "";
}
/*
* Start the download if the user accepts. We need to begin the download in order to get the
* license text bundled in the JAR file. We will not necessarily ask for user agreement here.
*/
if (provider == null) {
if (console == null) {
throw new IllegalStateException();
}
console.format(resources.getString("install"), textColor, DATABASE_SIZE, linkColor, directory, linkOff, resetColor);
if (!accept(resources.getString("download"), textColor, resetColor)) {
console.format("%n");
throw new AccessDeniedException(null);
}
console.format(resources.getString("downloading"), actionColor, resetColor);
provider = download();
}
/*
* If there is a need to ask for user agreement and we didn't asked yet, ask now.
*/
if (requireAgreement && accepted == null) {
final String license = getLicense(authority, locale, "text/plain");
if (license == null) {
accepted = Boolean.TRUE;
} else {
console.format("%n").writer().write(license);
console.format("%n");
accepted = accept(resources.getString("accept"), textColor, resetColor);
if (accepted) {
console.format(resources.getString("installing"), actionColor, resetColor);
}
}
}
if (accepted != null && !accepted) {
throw new AccessDeniedException(null);
}
return provider;
}
/**
* Asks the user to answer by "Yes" or "No". Callers is responsible for ensuring
* that the {@link #answers} map is non-empty before to invoke this method.
*
* @param prompt Message to show to the user.
* @return The user's answer.
*/
private boolean accept(final String prompt, final Object... arguments) {
Boolean answer;
do {
answer = answers.get(console.readLine(prompt, arguments).toLowerCase(locale));
} while (answer == null);
return answer;
}
/**
* Returns the terms of use of the dataset provided by the given authority, or {@code null} if none.
* The terms of use can be returned in either plain text or HTML.
*
* @param authority One of the values returned by {@link #getAuthorities()}.
* @param mimeType Either {@code "text/plain"} or {@code "text/html"}.
* @return The terms of use in plain text or HTML, or {@code null} if none.
* @throws IllegalArgumentException if the given {@code authority} argument is not one of the expected values.
* @throws IOException if an error occurred while reading the license file.
*/
@Override
public String getLicense(String authority, Locale locale, String mimeType) throws IOException {
return provider(authority, false).getLicense(authority, locale, mimeType);
}
/**
* Returns the names of installation scripts provided by the given authority.
* This method is invoked by {@link org.apache.sis.referencing.factory.sql.EPSGFactory#install(Connection)}
* for listing the SQL scripts to execute during EPSG dataset installation.
*
* <p>If that question has not already been asked, this method asks to the user if (s)he accepts
* EPSG terms of use. If (s)he refuses, an {@link AccessDeniedException} will be thrown.</p>
*
* @param authority One of the values returned by {@link #getAuthorities()}.
* @return The names of all SQL scripts to execute.
* @throws IllegalArgumentException if the given {@code authority} argument is not one of the expected values.
* @throws IOException if an error occurred while fetching the script names.
*/
@Override
public String[] getResourceNames(final String authority) throws IOException {
return provider(authority, true).getResourceNames(authority);
}
/**
* Returns a reader for the installation script at the given index.
* This method is invoked by {@link org.apache.sis.referencing.factory.sql.EPSGFactory#install(Connection)}
* for getting the SQL scripts to execute during EPSG dataset installation.
*
* <p>If that question has not already been asked, this method asks to the user if (s)he accepts
* EPSG terms of use. If (s)he refuses, an {@link AccessDeniedException} will be thrown.</p>
*
* @param authority One of the values returned by {@link #getAuthorities()}.
* @param resource Index of the script to open, from 0 inclusive to
* <code>{@linkplain #getResourceNames(String) getResourceNames}(authority).length</code> exclusive.
* @return A reader for the installation script content.
* @throws IllegalArgumentException if the given {@code authority} argument is not one of the expected values.
* @throws IndexOutOfBoundsException if the given {@code resource} argument is out of bounds.
* @throws IOException if an error occurred while creating the reader.
*/
@Override
public BufferedReader openScript(final String authority, final int resource) throws IOException {
return provider(authority, true).openScript(authority, resource);
}
}
|
|
package riotapi.staticdata.item;
public class BasicDataStatsDto {
private double FlatArmorMod;
private double FlatAttackSpeedMod;
private double FlatBlockMod;
private double FlatCritChanceMod;
private double FlatCritDamageMod;
private double FlatEXPBonus;
private double FlatEnergyPoolMod;
private double FlatEnergyRegenMod;
private double FlatHPPoolMod;
private double FlatHPRegenMod;
private double FlatMPPoolMod;
private double FlatMPRegenMod;
private double FlatMagicDamageMod;
private double FlatMovementSpeedMod;
private double FlatPhysicalDamageMod;
private double FlatSpellBlockMod;
private double PercentArmorMod;
private double PercentAttackSpeedMod;
private double PercentBlockMod;
private double PercentCritChanceMod;
private double PercentCritDamageMod;
private double PercentDodgeMod;
private double PercentEXPBonus;
private double PercentHPPoolMod;
private double PercentHPRegenMod;
private double PercentLifeStealMod;
private double PercentMPPoolMod;
private double PercentMPRegenMod;
private double PercentMagicDamageMod;
private double PercentMovementSpeedMod;
private double PercentPhysicalDamageMod;
private double PercentSpellBlockMod;
private double PercentSpellVampMod;
private double rFlatArmorModPerLevel;
private double rFlatArmorPenetrationMod;
private double rFlatArmorPenetrationModPerLevel;
private double rFlatCritChanceModPerLevel;
private double rFlatCritDamageModPerLevel;
private double rFlatDodgeMod;
private double rFlatDodgeModPerLevel;
private double rFlatEnergyModPerLevel;
private double rFlatEnergyRegenModPerLevel;
private double rFlatGoldPer10Mod;
private double rFlatHPModPerLevel;
private double rFlatHPRegenModPerLevel;
private double rFlatMPModPerLevel;
private double rFlatMPRegenModPerLevel;
private double rFlatMagicDamageModPerLevel;
private double rFlatMagicPenetrationMod;
private double rFlatMagicPenetrationModPerLevel;
private double rFlatMovementSpeedModPerLevel;
private double rFlatPhysicalDamageModPerLevel;
private double rFlatSpellBlockModPerLevel;
private double rFlatTimeDeadMod;
private double rFlatTimeDeadModPerLevel;
private double rPercentArmorPenetrationMod;
private double rPercentArmorPenetrationModPerLevel;
private double rPercentAttackSpeedModPerLevel;
private double rPercentCooldownMod;
private double rPercentCooldownModPerLevel;
private double rPercentMagicPenetrationMod;
private double rPercentMagicPenetrationModPerLevel;
private double rPercentMovementSpeedModPerLevel;
private double rPercentTimeDeadMod;
private double rPercentTimeDeadModPerLevel;
public double get_FlatArmorMod() {
return FlatArmorMod;
}
public void set_FlatArmorMod(double FlatArmorMod) {
this.FlatArmorMod = FlatArmorMod;
}
public double get_FlatAttackSpeedMod() {
return FlatAttackSpeedMod;
}
public void set_FlatAttackSpeedMod(double FlatAttackSpeedMod) {
this.FlatAttackSpeedMod = FlatAttackSpeedMod;
}
public double get_FlatBlockMod() {
return FlatBlockMod;
}
public void set_FlatBlockMod(double FlatBlockMod) {
this.FlatBlockMod = FlatBlockMod;
}
public double get_FlatCritChanceMod() {
return FlatCritChanceMod;
}
public void set_FlatCritChanceMod(double FlatCritChanceMod) {
this.FlatCritChanceMod = FlatCritChanceMod;
}
public double get_FlatCritDamageMod() {
return FlatCritDamageMod;
}
public void set_FlatCritDamageMod(double FlatCritDamageMod) {
this.FlatCritDamageMod = FlatCritDamageMod;
}
public double get_FlatEXPBonus() {
return FlatEXPBonus;
}
public void set_FlatEXPBonus(double FlatEXPBonus) {
this.FlatEXPBonus = FlatEXPBonus;
}
public double get_FlatEnergyPoolMod() {
return FlatEnergyPoolMod;
}
public void set_FlatEnergyPoolMod(double FlatEnergyPoolMod) {
this.FlatEnergyPoolMod = FlatEnergyPoolMod;
}
public double get_FlatEnergyRegenMod() {
return FlatEnergyRegenMod;
}
public void set_FlatEnergyRegenMod(double FlatEnergyRegenMod) {
this.FlatEnergyRegenMod = FlatEnergyRegenMod;
}
public double get_FlatHPPoolMod() {
return FlatHPPoolMod;
}
public void set_FlatHPPoolMod(double FlatHPPoolMod) {
this.FlatHPPoolMod = FlatHPPoolMod;
}
public double get_FlatHPRegenMod() {
return FlatHPRegenMod;
}
public void set_FlatHPRegenMod(double FlatHPRegenMod) {
this.FlatHPRegenMod = FlatHPRegenMod;
}
public double get_FlatMPPoolMod() {
return FlatMPPoolMod;
}
public void set_FlatMPPoolMod(double FlatMPPoolMod) {
this.FlatMPPoolMod = FlatMPPoolMod;
}
public double get_FlatMPRegenMod() {
return FlatMPRegenMod;
}
public void set_FlatMPRegenMod(double FlatMPRegenMod) {
this.FlatMPRegenMod = FlatMPRegenMod;
}
public double get_FlatMagicDamageMod() {
return FlatMagicDamageMod;
}
public void set_FlatMagicDamageMod(double FlatMagicDamageMod) {
this.FlatMagicDamageMod = FlatMagicDamageMod;
}
public double get_FlatMovementSpeedMod() {
return FlatMovementSpeedMod;
}
public void set_FlatMovementSpeedMod(double FlatMovementSpeedMod) {
this.FlatMovementSpeedMod = FlatMovementSpeedMod;
}
public double get_FlatPhysicalDamageMod() {
return FlatPhysicalDamageMod;
}
public void set_FlatPhysicalDamageMod(double FlatPhysicalDamageMod) {
this.FlatPhysicalDamageMod = FlatPhysicalDamageMod;
}
public double get_FlatSpellBlockMod() {
return FlatSpellBlockMod;
}
public void set_FlatSpellBlockMod(double FlatSpellBlockMod) {
this.FlatSpellBlockMod = FlatSpellBlockMod;
}
public double get_PercentArmorMod() {
return PercentArmorMod;
}
public void set_PercentArmorMod(double PercentArmorMod) {
this.PercentArmorMod = PercentArmorMod;
}
public double get_PercentAttackSpeedMod() {
return PercentAttackSpeedMod;
}
public void set_PercentAttackSpeedMod(double PercentAttackSpeedMod) {
this.PercentAttackSpeedMod = PercentAttackSpeedMod;
}
public double get_PercentBlockMod() {
return PercentBlockMod;
}
public void set_PercentBlockMod(double PercentBlockMod) {
this.PercentBlockMod = PercentBlockMod;
}
public double get_PercentCritChanceMod() {
return PercentCritChanceMod;
}
public void set_PercentCritChanceMod(double PercentCritChanceMod) {
this.PercentCritChanceMod = PercentCritChanceMod;
}
public double get_PercentCritDamageMod() {
return PercentCritDamageMod;
}
public void set_PercentCritDamageMod(double PercentCritDamageMod) {
this.PercentCritDamageMod = PercentCritDamageMod;
}
public double get_PercentDodgeMod() {
return PercentDodgeMod;
}
public void set_PercentDodgeMod(double PercentDodgeMod) {
this.PercentDodgeMod = PercentDodgeMod;
}
public double get_PercentEXPBonus() {
return PercentEXPBonus;
}
public void set_PercentEXPBonus(double PercentEXPBonus) {
this.PercentEXPBonus = PercentEXPBonus;
}
public double get_PercentHPPoolMod() {
return PercentHPPoolMod;
}
public void set_PercentHPPoolMod(double PercentHPPoolMod) {
this.PercentHPPoolMod = PercentHPPoolMod;
}
public double get_PercentHPRegenMod() {
return PercentHPRegenMod;
}
public void set_PercentHPRegenMod(double PercentHPRegenMod) {
this.PercentHPRegenMod = PercentHPRegenMod;
}
public double get_PercentLifeStealMod() {
return PercentLifeStealMod;
}
public void set_PercentLifeStealMod(double PercentLifeStealMod) {
this.PercentLifeStealMod = PercentLifeStealMod;
}
public double get_PercentMPPoolMod() {
return PercentMPPoolMod;
}
public void set_PercentMPPoolMod(double PercentMPPoolMod) {
this.PercentMPPoolMod = PercentMPPoolMod;
}
public double get_PercentMPRegenMod() {
return PercentMPRegenMod;
}
public void set_PercentMPRegenMod(double PercentMPRegenMod) {
this.PercentMPRegenMod = PercentMPRegenMod;
}
public double get_PercentMagicDamageMod() {
return PercentMagicDamageMod;
}
public void set_PercentMagicDamageMod(double PercentMagicDamageMod) {
this.PercentMagicDamageMod = PercentMagicDamageMod;
}
public double get_PercentMovementSpeedMod() {
return PercentMovementSpeedMod;
}
public void set_PercentMovementSpeedMod(double PercentMovementSpeedMod) {
this.PercentMovementSpeedMod = PercentMovementSpeedMod;
}
public double get_PercentPhysicalDamageMod() {
return PercentPhysicalDamageMod;
}
public void set_PercentPhysicalDamageMod(double PercentPhysicalDamageMod) {
this.PercentPhysicalDamageMod = PercentPhysicalDamageMod;
}
public double get_PercentSpellBlockMod() {
return PercentSpellBlockMod;
}
public void set_PercentSpellBlockMod(double PercentSpellBlockMod) {
this.PercentSpellBlockMod = PercentSpellBlockMod;
}
public double get_PercentSpellVampMod() {
return PercentSpellVampMod;
}
public void set_PercentSpellVampMod(double PercentSpellVampMod) {
this.PercentSpellVampMod = PercentSpellVampMod;
}
public double get_rFlatArmorModPerLevel() {
return rFlatArmorModPerLevel;
}
public void set_rFlatArmorModPerLevel(double rFlatArmorModPerLevel) {
this.rFlatArmorModPerLevel = rFlatArmorModPerLevel;
}
public double get_rFlatArmorPenetrationMod() {
return rFlatArmorPenetrationMod;
}
public void set_rFlatArmorPenetrationMod(double rFlatArmorPenetrationMod) {
this.rFlatArmorPenetrationMod = rFlatArmorPenetrationMod;
}
public double get_rFlatArmorPenetrationModPerLevel() {
return rFlatArmorPenetrationModPerLevel;
}
public void set_rFlatArmorPenetrationModPerLevel(
double rFlatArmorPenetrationModPerLevel) {
this.rFlatArmorPenetrationModPerLevel =
rFlatArmorPenetrationModPerLevel;
}
public double get_rFlatCritChanceModPerLevel() {
return rFlatCritChanceModPerLevel;
}
public void set_rFlatCritChanceModPerLevel(double rFlatCritChanceModPerLevel) {
this.rFlatCritChanceModPerLevel = rFlatCritChanceModPerLevel;
}
public double get_rFlatCritDamageModPerLevel() {
return rFlatCritDamageModPerLevel;
}
public void set_rFlatCritDamageModPerLevel(double rFlatCritDamageModPerLevel) {
this.rFlatCritDamageModPerLevel = rFlatCritDamageModPerLevel;
}
public double get_rFlatDodgeMod() {
return rFlatDodgeMod;
}
public void set_rFlatDodgeMod(double rFlatDodgeMod) {
this.rFlatDodgeMod = rFlatDodgeMod;
}
public double get_rFlatDodgeModPerLevel() {
return rFlatDodgeModPerLevel;
}
public void set_rFlatDodgeModPerLevel(double rFlatDodgeModPerLevel) {
this.rFlatDodgeModPerLevel = rFlatDodgeModPerLevel;
}
public double get_rFlatEnergyModPerLevel() {
return rFlatEnergyModPerLevel;
}
public void set_rFlatEnergyModPerLevel(double rFlatEnergyModPerLevel) {
this.rFlatEnergyModPerLevel = rFlatEnergyModPerLevel;
}
public double get_rFlatEnergyRegenModPerLevel() {
return rFlatEnergyRegenModPerLevel;
}
public void set_rFlatEnergyRegenModPerLevel(
double rFlatEnergyRegenModPerLevel) {
this.rFlatEnergyRegenModPerLevel = rFlatEnergyRegenModPerLevel;
}
public double get_rFlatGoldPer10Mod() {
return rFlatGoldPer10Mod;
}
public void set_rFlatGoldPer10Mod(double rFlatGoldPer10Mod) {
this.rFlatGoldPer10Mod = rFlatGoldPer10Mod;
}
public double get_rFlatHPModPerLevel() {
return rFlatHPModPerLevel;
}
public void set_rFlatHPModPerLevel(double rFlatHPModPerLevel) {
this.rFlatHPModPerLevel = rFlatHPModPerLevel;
}
public double get_rFlatHPRegenModPerLevel() {
return rFlatHPRegenModPerLevel;
}
public void set_rFlatHPRegenModPerLevel(double rFlatHPRegenModPerLevel) {
this.rFlatHPRegenModPerLevel = rFlatHPRegenModPerLevel;
}
public double get_rFlatMPModPerLevel() {
return rFlatMPModPerLevel;
}
public void set_rFlatMPModPerLevel(double rFlatMPModPerLevel) {
this.rFlatMPModPerLevel = rFlatMPModPerLevel;
}
public double get_rFlatMPRegenModPerLevel() {
return rFlatMPRegenModPerLevel;
}
public void set_rFlatMPRegenModPerLevel(double rFlatMPRegenModPerLevel) {
this.rFlatMPRegenModPerLevel = rFlatMPRegenModPerLevel;
}
public double get_rFlatMagicDamageModPerLevel() {
return rFlatMagicDamageModPerLevel;
}
public void set_rFlatMagicDamageModPerLevel(
double rFlatMagicDamageModPerLevel) {
this.rFlatMagicDamageModPerLevel = rFlatMagicDamageModPerLevel;
}
public double get_rFlatMagicPenetrationMod() {
return rFlatMagicPenetrationMod;
}
public void set_rFlatMagicPenetrationMod(double rFlatMagicPenetrationMod) {
this.rFlatMagicPenetrationMod = rFlatMagicPenetrationMod;
}
public double get_rFlatMagicPenetrationModPerLevel() {
return rFlatMagicPenetrationModPerLevel;
}
public void set_rFlatMagicPenetrationModPerLevel(
double rFlatMagicPenetrationModPerLevel) {
this.rFlatMagicPenetrationModPerLevel =
rFlatMagicPenetrationModPerLevel;
}
public double get_rFlatMovementSpeedModPerLevel() {
return rFlatMovementSpeedModPerLevel;
}
public void set_rFlatMovementSpeedModPerLevel(
double rFlatMovementSpeedModPerLevel) {
this.rFlatMovementSpeedModPerLevel = rFlatMovementSpeedModPerLevel;
}
public double get_rFlatPhysicalDamageModPerLevel() {
return rFlatPhysicalDamageModPerLevel;
}
public void set_rFlatPhysicalDamageModPerLevel(
double rFlatPhysicalDamageModPerLevel) {
this.rFlatPhysicalDamageModPerLevel = rFlatPhysicalDamageModPerLevel;
}
public double get_rFlatSpellBlockModPerLevel() {
return rFlatSpellBlockModPerLevel;
}
public void set_rFlatSpellBlockModPerLevel(double rFlatSpellBlockModPerLevel) {
this.rFlatSpellBlockModPerLevel = rFlatSpellBlockModPerLevel;
}
public double get_rFlatTimeDeadMod() {
return rFlatTimeDeadMod;
}
public void set_rFlatTimeDeadMod(double rFlatTimeDeadMod) {
this.rFlatTimeDeadMod = rFlatTimeDeadMod;
}
public double get_rFlatTimeDeadModPerLevel() {
return rFlatTimeDeadModPerLevel;
}
public void set_rFlatTimeDeadModPerLevel(double rFlatTimeDeadModPerLevel) {
this.rFlatTimeDeadModPerLevel = rFlatTimeDeadModPerLevel;
}
public double get_rPercentArmorPenetrationMod() {
return rPercentArmorPenetrationMod;
}
public void set_rPercentArmorPenetrationMod(
double rPercentArmorPenetrationMod) {
this.rPercentArmorPenetrationMod = rPercentArmorPenetrationMod;
}
public double get_rPercentArmorPenetrationModPerLevel() {
return rPercentArmorPenetrationModPerLevel;
}
public void set_rPercentArmorPenetrationModPerLevel(
double rPercentArmorPenetrationModPerLevel) {
this.rPercentArmorPenetrationModPerLevel =
rPercentArmorPenetrationModPerLevel;
}
public double get_rPercentAttackSpeedModPerLevel() {
return rPercentAttackSpeedModPerLevel;
}
public void set_rPercentAttackSpeedModPerLevel(
double rPercentAttackSpeedModPerLevel) {
this.rPercentAttackSpeedModPerLevel = rPercentAttackSpeedModPerLevel;
}
public double get_rPercentCooldownMod() {
return rPercentCooldownMod;
}
public void set_rPercentCooldownMod(double rPercentCooldownMod) {
this.rPercentCooldownMod = rPercentCooldownMod;
}
public double get_rPercentCooldownModPerLevel() {
return rPercentCooldownModPerLevel;
}
public void set_rPercentCooldownModPerLevel(
double rPercentCooldownModPerLevel) {
this.rPercentCooldownModPerLevel = rPercentCooldownModPerLevel;
}
public double get_rPercentMagicPenetrationMod() {
return rPercentMagicPenetrationMod;
}
public void set_rPercentMagicPenetrationMod(
double rPercentMagicPenetrationMod) {
this.rPercentMagicPenetrationMod = rPercentMagicPenetrationMod;
}
public double get_rPercentMagicPenetrationModPerLevel() {
return rPercentMagicPenetrationModPerLevel;
}
public void set_rPercentMagicPenetrationModPerLevel(
double rPercentMagicPenetrationModPerLevel) {
this.rPercentMagicPenetrationModPerLevel =
rPercentMagicPenetrationModPerLevel;
}
public double get_rPercentMovementSpeedModPerLevel() {
return rPercentMovementSpeedModPerLevel;
}
public void set_rPercentMovementSpeedModPerLevel(
double rPercentMovementSpeedModPerLevel) {
this.rPercentMovementSpeedModPerLevel =
rPercentMovementSpeedModPerLevel;
}
public double get_rPercentTimeDeadMod() {
return rPercentTimeDeadMod;
}
public void set_rPercentTimeDeadMod(double rPercentTimeDeadMod) {
this.rPercentTimeDeadMod = rPercentTimeDeadMod;
}
public double get_rPercentTimeDeadModPerLevel() {
return rPercentTimeDeadModPerLevel;
}
public void set_rPercentTimeDeadModPerLevel(
double rPercentTimeDeadModPerLevel) {
this.rPercentTimeDeadModPerLevel = rPercentTimeDeadModPerLevel;
}
}
|
|
/*******************************************************************************
* Copyright FUJITSU LIMITED 2017
*******************************************************************************/
package org.oscm.test;
import java.lang.annotation.Annotation;
import java.lang.reflect.Array;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.Set;
import javax.persistence.CascadeType;
import javax.persistence.ManyToMany;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import javax.persistence.OneToOne;
import org.oscm.domobjects.DomainDataContainer;
import org.oscm.domobjects.DomainObject;
// ----------------------------------------------------------------------------
/**
* This non-instantiable non-extendible class provides a static clone() method
* suitable for cloning an instance of a class satisfying the following
* constraints:
* <UL>
* <LI>no-arg constructor is available (not necessarily public)
* <LI>neither the class nor any of its superclasses have any final fields
* <LI>neither the class nor any of its superclasses have any inner classes
* </UL>
*
* This class requires sufficient security privileges to work. This
* implementation is not industrial strength and is provided for demo purposes.
* <P>
*
* MT-safety: this class is safe for use from mutliple concurrent threads.
*
* @author (C) <a href="mailto:[email protected]">Vlad Roubtsov</a>,
* 2002
*/
public abstract class ReflectiveClone {
/**
* Makes a reflection-based deep clone of 'obj'.
*
* @param obj
* input object to clone [null will cause a NullPointerException]
* @return obj's deep clone [never null; can be == to 'obj']
*
* @throws RuntimeException
* on any failure
*/
public static Object clone(final Object obj) {
return clone(obj, new IdentityHashMap<Object, Object>(),
new HashMap<Class<?>, ClassMetadata>());
}
private ReflectiveClone() {
} // prevent subclassing
/*
* Internal class used to cache class metadata information.
*/
private static final class ClassMetadata {
Constructor<?> m_noargConstructor; // cached no-arg constructor
Field[] m_declaredFields; // cached declared fields
boolean m_noargConstructorAccessible;
boolean m_fieldsAccessible;
} // end of nested class
/**
* The workhorse behind clone(Object). This method is mutually recursive
* with {@link #setFields(Object, Object, Field[], boolean, Map, Map)}.
*
* @param obj
* current source object being cloned
* @param objMap
* maps a source object to its clone in the current traversal
* @param metadataMap
* maps a Class object to its ClassMetadata.
*/
private static Object clone(final Object obj,
final Map<Object, Object> objMap,
final Map<Class<?>, ClassMetadata> metadataMap) {
if (DEBUG)
System.out.println("traversing src obj [" + obj + "]");
// return 'obj' clone if it has been instantiated already:
if (objMap.containsKey(obj))
return objMap.get(obj);
final Class<?> objClass = obj.getClass();
final Object result;
if (objClass.isArray()) {
final int arrayLength = Array.getLength(obj);
if (arrayLength == 0) // empty arrays are immutable
{
objMap.put(obj, obj);
return obj;
} else {
final Class<?> componentType = objClass.getComponentType();
// even though arrays implicitly have a public clone(), it
// cannot be invoked reflectively, so need to do copy
// construction:
result = Array.newInstance(componentType, arrayLength);
objMap.put(obj, result);
if (componentType.isPrimitive()
|| FINAL_IMMUTABLE_CLASSES.contains(componentType)) {
System.arraycopy(obj, 0, result, 0, arrayLength);
} else {
for (int i = 0; i < arrayLength; ++i) {
// recursively clone each array slot:
final Object slot = Array.get(obj, i);
if (slot != null) {
final Object slotClone = clone(slot, objMap,
metadataMap);
Array.set(result, i, slotClone);
}
}
}
return result;
}
} else if (FINAL_IMMUTABLE_CLASSES.contains(objClass)) {
objMap.put(obj, obj);
return obj;
}
// fall through to reflectively populating an instance created
// with a noarg constructor:
ClassMetadata metadata = metadataMap.get(objClass);
if (metadata == null) {
metadata = new ClassMetadata();
metadataMap.put(objClass, metadata);
}
{ // clone = objClass.newInstance () can't handle private constructors
Constructor<?> noarg = metadata.m_noargConstructor;
if (noarg == null) {
try {
noarg = objClass.getDeclaredConstructor(EMPTY_CLASS_ARRAY);
metadata.m_noargConstructor = noarg;
} catch (Exception e) {
throw new RuntimeException("class [" + objClass.getName()
+ "] has no noarg constructor: " + e.toString());
}
}
if (!metadata.m_noargConstructorAccessible
&& (Modifier.PUBLIC & noarg.getModifiers()) == 0) {
try {
noarg.setAccessible(true);
} catch (SecurityException e) {
throw new RuntimeException(
"cannot access noarg constructor [" + noarg
+ "] of class [" + objClass.getName()
+ "]: " + e.toString());
}
metadata.m_noargConstructorAccessible = true;
}
try // to create a clone via the no-arg constructor
{
result = noarg.newInstance(EMPTY_OBJECT_ARRAY);
objMap.put(obj, result);
} catch (Exception e) {
throw new RuntimeException("cannot instantiate class ["
+ objClass.getName() + "] using noarg constructor: "
+ e.toString());
}
}
for (Class<?> c = objClass; c != Object.class; c = c.getSuperclass()) {
metadata = metadataMap.get(c);
if (metadata == null) {
metadata = new ClassMetadata();
metadataMap.put(c, metadata);
}
Field[] declaredFields = metadata.m_declaredFields;
if (declaredFields == null) {
declaredFields = c.getDeclaredFields();
metadata.m_declaredFields = declaredFields;
}
setFields(obj, result, declaredFields, metadata.m_fieldsAccessible,
objMap, metadataMap);
metadata.m_fieldsAccessible = true;
}
return result;
}
/**
* This method sets clones all declared 'fields' from 'src' to 'dest' and
* updates the object and metadata maps accordingly.
*
* @param src
* source object
* @param dest
* src's clone [not fully populated yet]
* @param fields
* fields to be populated
* @param accessible
* 'true' if all 'fields' have been made accessible during this
* traversal
*/
private static void setFields(final Object src, final Object dest,
final Field[] fields, final boolean accessible,
final Map<Object, Object> objMap,
final Map<Class<?>, ClassMetadata> metadataMap) {
for (int f = 0, fieldsLength = fields.length; f < fieldsLength; ++f) {
final Field field = fields[f];
final int modifiers = field.getModifiers();
if (DEBUG)
System.out.println("dest object [" + dest + "]: field #" + f
+ ", [" + field + "]");
if ((Modifier.STATIC & modifiers) != 0)
continue;
// can also skip transient fields here if you want reflective
// cloning
// to be more like serialization
if ((Modifier.FINAL & modifiers) != 0)
throw new RuntimeException("cannot set final field ["
+ field.getName() + "] of class ["
+ src.getClass().getName() + "]");
if (!accessible && ((Modifier.PUBLIC & modifiers) == 0)) {
try {
field.setAccessible(true);
} catch (SecurityException e) {
throw new RuntimeException("cannot access field ["
+ field.getName() + "] of class ["
+ src.getClass().getName() + "]: " + e.toString());
}
}
try // to clone and set the field value:
{
Object value = field.get(src);
if (value == null) {
field.set(dest, null); // can't assume that the constructor
// left this as null
if (DEBUG)
System.out.println("set field #" + f + ", [" + field
+ "] of object [" + dest + "]: NULL");
} else {
final Class<?> valueType = value.getClass();
// handle enum
if (valueType.getSuperclass() == Enum.class) {
field.set(dest, value);
} else {
if (!valueType.isPrimitive()
&& !FINAL_IMMUTABLE_CLASSES.contains(valueType)) {
// value is an object reference and it could be
// either
// an array
// or of some mutable type: try to clone it deeply
// to be
// on the safe side
// Cloning of associations (i.e. members of
// non-primitive classes)
// shall
// follow the following rules:
// 1) Follow if type is DomainDataContainer
// 2) Follow if member is JPA-annotated with
// cascade-option ALL,
// PERSIST, DELETE or MERGE
if (DomainDataContainer.class
.isAssignableFrom(field.getType())
|| needsToCascade(field)
|| value.getClass().isArray()) {
value = clone(value, objMap, metadataMap);
}
}
field.set(dest, value);
}
if (DEBUG)
System.out.println("set field #" + f + ", [" + field
+ "] of object [" + dest + "]: " + value);
}
} catch (Exception e) {
if (DEBUG)
e.printStackTrace(System.out);
throw new RuntimeException("cannot set field ["
+ field.getName() + "] of class ["
+ src.getClass().getName() + "]: " + e.toString());
}
}
}
private static boolean needsToCascade(Field field) {
Class<?> fieldtype = field.getType();
if (!DomainObject.class.isAssignableFrom(fieldtype))
return false;
Annotation ann;
CascadeType[] cascades = null;
ann = field.getAnnotation(OneToOne.class);
if (ann != null) {
cascades = ((OneToOne) ann).cascade();
} else {
ann = field.getAnnotation(OneToMany.class);
if (ann != null) {
cascades = ((OneToMany) ann).cascade();
} else {
ann = field.getAnnotation(ManyToOne.class);
if (ann != null) {
cascades = ((ManyToOne) ann).cascade();
} else {
ann = field.getAnnotation(ManyToMany.class);
if (ann != null) {
cascades = ((ManyToMany) ann).cascade();
}
}
}
}
if (cascades == null)
return false;
for (CascadeType cas : cascades) {
if ((cas == CascadeType.ALL) || (cas == CascadeType.MERGE)
|| (cas == CascadeType.PERSIST)
|| (cas == CascadeType.REMOVE)) {
return true;
}
}
return false;
}
private static final boolean DEBUG = false;
private static final Set<Class<?>> FINAL_IMMUTABLE_CLASSES; // set in
// <clinit>
private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0];
private static final Class<?>[] EMPTY_CLASS_ARRAY = new Class[0];
static {
FINAL_IMMUTABLE_CLASSES = new HashSet<Class<?>>(17);
// add some common final/immutable classes:
FINAL_IMMUTABLE_CLASSES.add(String.class);
FINAL_IMMUTABLE_CLASSES.add(Byte.class);
FINAL_IMMUTABLE_CLASSES.add(Short.class);
FINAL_IMMUTABLE_CLASSES.add(Integer.class);
FINAL_IMMUTABLE_CLASSES.add(Long.class);
FINAL_IMMUTABLE_CLASSES.add(Float.class);
FINAL_IMMUTABLE_CLASSES.add(Double.class);
FINAL_IMMUTABLE_CLASSES.add(Character.class);
FINAL_IMMUTABLE_CLASSES.add(Boolean.class);
}
} // end of class
// ----------------------------------------------------------------------------
|
|
/*
* Copyright 2015-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rsocket.core;
import static io.rsocket.frame.FrameType.METADATA_PUSH;
import static io.rsocket.frame.FrameType.NEXT;
import static io.rsocket.frame.FrameType.REQUEST_CHANNEL;
import static io.rsocket.frame.FrameType.REQUEST_FNF;
import static io.rsocket.frame.FrameType.REQUEST_RESPONSE;
import static io.rsocket.frame.FrameType.REQUEST_STREAM;
import io.netty.buffer.ByteBuf;
import io.rsocket.FrameAssert;
import io.rsocket.Payload;
import io.rsocket.PayloadAssert;
import io.rsocket.RSocket;
import io.rsocket.buffer.LeaksTrackingByteBufAllocator;
import io.rsocket.frame.FrameType;
import io.rsocket.internal.subscriber.AssertSubscriber;
import io.rsocket.plugins.RequestInterceptor;
import io.rsocket.plugins.TestRequestInterceptor;
import io.rsocket.test.util.TestDuplexConnection;
import java.util.ArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Stream;
import org.assertj.core.api.Assumptions;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.reactivestreams.Publisher;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.Operators;
import reactor.test.publisher.TestPublisher;
public class ResponderOperatorsCommonTest {
interface Scenario {
FrameType requestType();
int maxElements();
ResponderFrameHandler responseOperator(
long initialRequestN,
Payload firstPayload,
TestRequesterResponderSupport streamManager,
RSocket handler);
ResponderFrameHandler responseOperator(
long initialRequestN,
ByteBuf firstFragment,
TestRequesterResponderSupport streamManager,
RSocket handler);
}
static Stream<Scenario> scenarios() {
return Stream.of(
new Scenario() {
@Override
public FrameType requestType() {
return FrameType.REQUEST_RESPONSE;
}
@Override
public int maxElements() {
return 1;
}
@Override
public ResponderFrameHandler responseOperator(
long initialRequestN,
ByteBuf firstFragment,
TestRequesterResponderSupport streamManager,
RSocket handler) {
int streamId = streamManager.getNextStreamId();
RequestResponseResponderSubscriber subscriber =
new RequestResponseResponderSubscriber(
streamId, firstFragment, streamManager, handler);
streamManager.activeStreams.put(streamId, subscriber);
final RequestInterceptor requestInterceptor = streamManager.getRequestInterceptor();
if (requestInterceptor != null) {
requestInterceptor.onStart(streamId, REQUEST_RESPONSE, null);
}
return subscriber;
}
@Override
public ResponderFrameHandler responseOperator(
long initialRequestN,
Payload firstPayload,
TestRequesterResponderSupport streamManager,
RSocket handler) {
int streamId = streamManager.getNextStreamId();
RequestResponseResponderSubscriber subscriber =
new RequestResponseResponderSubscriber(streamId, streamManager);
streamManager.activeStreams.put(streamId, subscriber);
final RequestInterceptor requestInterceptor = streamManager.getRequestInterceptor();
if (requestInterceptor != null) {
requestInterceptor.onStart(streamId, REQUEST_RESPONSE, null);
}
return handler.requestResponse(firstPayload).subscribeWith(subscriber);
}
@Override
public String toString() {
return RequestResponseRequesterMono.class.getSimpleName();
}
},
new Scenario() {
@Override
public FrameType requestType() {
return FrameType.REQUEST_STREAM;
}
@Override
public int maxElements() {
return Integer.MAX_VALUE;
}
@Override
public ResponderFrameHandler responseOperator(
long initialRequestN,
ByteBuf firstFragment,
TestRequesterResponderSupport streamManager,
RSocket handler) {
int streamId = streamManager.getNextStreamId();
RequestStreamResponderSubscriber subscriber =
new RequestStreamResponderSubscriber(
streamId, initialRequestN, firstFragment, streamManager, handler);
final RequestInterceptor requestInterceptor = streamManager.getRequestInterceptor();
if (requestInterceptor != null) {
requestInterceptor.onStart(streamId, REQUEST_STREAM, null);
}
streamManager.activeStreams.put(streamId, subscriber);
return subscriber;
}
@Override
public ResponderFrameHandler responseOperator(
long initialRequestN,
Payload firstPayload,
TestRequesterResponderSupport streamManager,
RSocket handler) {
int streamId = streamManager.getNextStreamId();
RequestStreamResponderSubscriber subscriber =
new RequestStreamResponderSubscriber(streamId, initialRequestN, streamManager);
streamManager.activeStreams.put(streamId, subscriber);
final RequestInterceptor requestInterceptor = streamManager.getRequestInterceptor();
if (requestInterceptor != null) {
requestInterceptor.onStart(streamId, REQUEST_STREAM, null);
}
return handler.requestStream(firstPayload).subscribeWith(subscriber);
}
@Override
public String toString() {
return RequestStreamResponderSubscriber.class.getSimpleName();
}
},
new Scenario() {
@Override
public FrameType requestType() {
return FrameType.REQUEST_CHANNEL;
}
@Override
public int maxElements() {
return Integer.MAX_VALUE;
}
@Override
public ResponderFrameHandler responseOperator(
long initialRequestN,
ByteBuf firstFragment,
TestRequesterResponderSupport streamManager,
RSocket handler) {
int streamId = streamManager.getNextStreamId();
RequestChannelResponderSubscriber subscriber =
new RequestChannelResponderSubscriber(
streamId, initialRequestN, firstFragment, streamManager, handler);
streamManager.activeStreams.put(streamId, subscriber);
final RequestInterceptor requestInterceptor = streamManager.getRequestInterceptor();
if (requestInterceptor != null) {
requestInterceptor.onStart(streamId, REQUEST_CHANNEL, null);
}
return subscriber;
}
@Override
public ResponderFrameHandler responseOperator(
long initialRequestN,
Payload firstPayload,
TestRequesterResponderSupport streamManager,
RSocket handler) {
int streamId = streamManager.getNextStreamId();
RequestChannelResponderSubscriber responderSubscriber =
new RequestChannelResponderSubscriber(
streamId, initialRequestN, firstPayload, streamManager);
streamManager.activeStreams.put(streamId, responderSubscriber);
final RequestInterceptor requestInterceptor = streamManager.getRequestInterceptor();
if (requestInterceptor != null) {
requestInterceptor.onStart(streamId, REQUEST_CHANNEL, null);
}
return handler.requestChannel(responderSubscriber).subscribeWith(responderSubscriber);
}
@Override
public String toString() {
return RequestChannelResponderSubscriber.class.getSimpleName();
}
});
}
static class TestHandler implements RSocket {
final TestPublisher<Payload> producer;
final AssertSubscriber<Payload> consumer;
TestHandler(TestPublisher<Payload> producer, AssertSubscriber<Payload> consumer) {
this.producer = producer;
this.consumer = consumer;
}
@Override
public Mono<Void> fireAndForget(Payload payload) {
consumer.onSubscribe(Operators.emptySubscription());
consumer.onNext(payload);
consumer.onComplete();
return producer.mono().then();
}
@Override
public Mono<Payload> requestResponse(Payload payload) {
consumer.onSubscribe(Operators.emptySubscription());
consumer.onNext(payload);
consumer.onComplete();
return producer.mono();
}
@Override
public Flux<Payload> requestStream(Payload payload) {
consumer.onSubscribe(Operators.emptySubscription());
consumer.onNext(payload);
consumer.onComplete();
return producer.flux();
}
@Override
public Flux<Payload> requestChannel(Publisher<Payload> payloads) {
payloads.subscribe(consumer);
return producer.flux();
}
}
@ParameterizedTest
@MethodSource("scenarios")
void shouldHandleRequest(Scenario scenario) {
Assumptions.assumeThat(scenario.requestType()).isNotIn(REQUEST_FNF, METADATA_PUSH);
TestRequestInterceptor testRequestInterceptor = new TestRequestInterceptor();
TestRequesterResponderSupport testRequesterResponderSupport =
TestRequesterResponderSupport.client(testRequestInterceptor);
final LeaksTrackingByteBufAllocator allocator = testRequesterResponderSupport.getAllocator();
final TestDuplexConnection sender = testRequesterResponderSupport.getDuplexConnection();
TestPublisher<Payload> testPublisher = TestPublisher.create();
TestHandler testHandler = new TestHandler(testPublisher, new AssertSubscriber<>(0));
ResponderFrameHandler responderFrameHandler =
scenario.responseOperator(
Long.MAX_VALUE,
TestRequesterResponderSupport.genericPayload(allocator),
testRequesterResponderSupport,
testHandler);
Payload randomPayload = TestRequesterResponderSupport.randomPayload(allocator);
testPublisher.assertWasSubscribed();
testPublisher.next(randomPayload.retain());
testPublisher.complete();
FrameAssert.assertThat(sender.awaitFrame())
.isNotNull()
.hasStreamId(1)
.typeOf(scenario.requestType() == REQUEST_RESPONSE ? FrameType.NEXT_COMPLETE : NEXT)
.hasPayloadSize(
randomPayload.data().readableBytes() + randomPayload.sliceMetadata().readableBytes())
.hasData(randomPayload.data())
.hasNoLeaks();
PayloadAssert.assertThat(randomPayload).hasNoLeaks();
if (scenario.requestType() != REQUEST_RESPONSE) {
FrameAssert.assertThat(sender.awaitFrame())
.typeOf(FrameType.COMPLETE)
.hasStreamId(1)
.hasNoLeaks();
if (scenario.requestType() == REQUEST_CHANNEL) {
testHandler.consumer.request(2);
FrameAssert.assertThat(sender.awaitFrame())
.typeOf(FrameType.REQUEST_N)
.hasStreamId(1)
.hasRequestN(1)
.hasNoLeaks();
responderFrameHandler.handleComplete();
testHandler.consumer.assertComplete();
}
}
testHandler
.consumer
.assertValueCount(1)
.assertValuesWith(p -> PayloadAssert.assertThat(p).hasNoLeaks());
testRequestInterceptor
.expectOnStart(1, scenario.requestType())
.expectOnComplete(1)
.expectNothing();
allocator.assertHasNoLeaks();
}
@ParameterizedTest
@MethodSource("scenarios")
void shouldHandleFragmentedRequest(Scenario scenario) {
Assumptions.assumeThat(scenario.requestType()).isNotIn(REQUEST_FNF, METADATA_PUSH);
TestRequestInterceptor testRequestInterceptor = new TestRequestInterceptor();
TestRequesterResponderSupport testRequesterResponderSupport =
TestRequesterResponderSupport.client(testRequestInterceptor);
final LeaksTrackingByteBufAllocator allocator = testRequesterResponderSupport.getAllocator();
final TestDuplexConnection sender = testRequesterResponderSupport.getDuplexConnection();
TestPublisher<Payload> testPublisher = TestPublisher.create();
TestHandler testHandler = new TestHandler(testPublisher, new AssertSubscriber<>(0));
int mtu = ThreadLocalRandom.current().nextInt(64, 256);
Payload firstPayload = TestRequesterResponderSupport.randomPayload(allocator);
ArrayList<ByteBuf> fragments =
TestRequesterResponderSupport.prepareFragments(allocator, mtu, firstPayload);
ByteBuf firstFragment = fragments.remove(0);
ResponderFrameHandler responderFrameHandler =
scenario.responseOperator(
Long.MAX_VALUE, firstFragment, testRequesterResponderSupport, testHandler);
firstFragment.release();
testPublisher.assertWasNotSubscribed();
testRequesterResponderSupport.assertHasStream(1, responderFrameHandler);
for (int i = 0; i < fragments.size(); i++) {
ByteBuf fragment = fragments.get(i);
boolean hasFollows = i != fragments.size() - 1;
responderFrameHandler.handleNext(fragment, hasFollows, !hasFollows);
fragment.release();
}
Payload randomPayload = TestRequesterResponderSupport.randomPayload(allocator);
testPublisher.assertWasSubscribed();
testPublisher.next(randomPayload.retain());
testPublisher.complete();
FrameAssert.assertThat(sender.awaitFrame())
.isNotNull()
.hasStreamId(1)
.typeOf(scenario.requestType() == REQUEST_RESPONSE ? FrameType.NEXT_COMPLETE : NEXT)
.hasPayloadSize(
randomPayload.data().readableBytes() + randomPayload.sliceMetadata().readableBytes())
.hasData(randomPayload.data())
.hasNoLeaks();
PayloadAssert.assertThat(randomPayload).hasNoLeaks();
if (scenario.requestType() != REQUEST_RESPONSE) {
FrameAssert.assertThat(sender.awaitFrame())
.typeOf(FrameType.COMPLETE)
.hasStreamId(1)
.hasNoLeaks();
if (scenario.requestType() == REQUEST_CHANNEL) {
testHandler.consumer.request(2);
FrameAssert.assertThat(sender.pollFrame()).isNull();
}
}
testHandler
.consumer
.assertValueCount(1)
.assertValuesWith(
p -> PayloadAssert.assertThat(p).hasData(firstPayload.sliceData()).hasNoLeaks())
.assertComplete();
testRequesterResponderSupport.assertNoActiveStreams();
firstPayload.release();
testRequestInterceptor
.expectOnStart(1, scenario.requestType())
.expectOnComplete(1)
.expectNothing();
allocator.assertHasNoLeaks();
}
@ParameterizedTest
@MethodSource("scenarios")
void shouldHandleInterruptedFragmentation(Scenario scenario) {
Assumptions.assumeThat(scenario.requestType()).isNotIn(REQUEST_FNF, METADATA_PUSH);
final TestRequestInterceptor testRequestInterceptor = new TestRequestInterceptor();
TestRequesterResponderSupport testRequesterResponderSupport =
TestRequesterResponderSupport.client(testRequestInterceptor);
final LeaksTrackingByteBufAllocator allocator = testRequesterResponderSupport.getAllocator();
TestPublisher<Payload> testPublisher = TestPublisher.create();
TestHandler testHandler = new TestHandler(testPublisher, new AssertSubscriber<>(0));
int mtu = ThreadLocalRandom.current().nextInt(64, 256);
Payload firstPayload = TestRequesterResponderSupport.randomPayload(allocator);
ArrayList<ByteBuf> fragments =
TestRequesterResponderSupport.prepareFragments(allocator, mtu, firstPayload);
firstPayload.release();
ByteBuf firstFragment = fragments.remove(0);
ResponderFrameHandler responderFrameHandler =
scenario.responseOperator(
Long.MAX_VALUE, firstFragment, testRequesterResponderSupport, testHandler);
firstFragment.release();
testPublisher.assertWasNotSubscribed();
testRequesterResponderSupport.assertHasStream(1, responderFrameHandler);
for (int i = 0; i < fragments.size(); i++) {
ByteBuf fragment = fragments.get(i);
boolean hasFollows = i != fragments.size() - 1;
if (hasFollows) {
responderFrameHandler.handleNext(fragment, true, false);
} else {
responderFrameHandler.handleCancel();
}
fragment.release();
}
testPublisher.assertWasNotSubscribed();
testRequesterResponderSupport.assertNoActiveStreams();
testRequestInterceptor
.expectOnStart(1, scenario.requestType())
.expectOnCancel(1)
.expectNothing();
allocator.assertHasNoLeaks();
}
}
|
|
package net.polybugger.apollot;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.res.Resources;
import android.os.Bundle;
import android.os.Handler;
import android.support.annotation.Nullable;
import android.support.design.widget.Snackbar;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.TextView;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.Map;
import net.polybugger.apollot.db.AcademicTermContract;
import net.polybugger.apollot.db.ApolloDbAdapter;
import net.polybugger.apollot.db.ClassContract;
import net.polybugger.apollot.db.ClassItemTypeContract;
import net.polybugger.apollot.db.ClassScheduleContract;
import net.polybugger.apollot.db.PastCurrentEnum;
import org.apache.commons.lang3.StringUtils;
public class ClassesFragment extends Fragment {
public static final String TAG_PAST = "net.polybugger.apollot.past_classes_fragment";
public static final String TAG_CURRENT = "net.polybugger.apollot.current_classes_fragment";
public static final String PAST_CURRENT_ARG = "net.polybugger.apollot.past_current_arg";
public static boolean REQUERY = false;
public static boolean REQUERY_CLASS = false;
public static boolean DELETE_CLASS = false;
public static ClassContract.ClassEntry CLASS = null;
private PastCurrentEnum mPastCurrent;
private RecyclerView mRecyclerView;
private Adapter mAdapter;
public static ClassesFragment newInstance(PastCurrentEnum pastCurrentEnum) {
ClassesFragment f = new ClassesFragment();
Bundle args = new Bundle();
args.putSerializable(PAST_CURRENT_ARG, pastCurrentEnum);
f.setArguments(args);
return f;
}
@Override
public void onAttach(Context context) {
super.onAttach(context);
if(context instanceof Activity) {
/*
try {
mListener = (Listener) context;
}
catch(ClassCastException e) {
throw new ClassCastException(context.toString() + " must implement " + Listener.class.toString());
}
*/
}
}
@Override
public void onDetach() {
// mListener = null;
super.onDetach();
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
setHasOptionsMenu(true);
Bundle args = getArguments();
mPastCurrent = (PastCurrentEnum) args.getSerializable(PAST_CURRENT_ARG);
switch(mPastCurrent) {
case PAST:
getActivity().setTitle(R.string.past_classes);
break;
case CURRENT:
getActivity().setTitle(R.string.current_classes);
break;
}
View view = inflater.inflate(R.layout.fragment_classes, container, false);
mRecyclerView = (RecyclerView) view.findViewById(R.id.recycler_view);
mRecyclerView.setHasFixedSize(true);
LinearLayoutManager linearLayoutManager = new LinearLayoutManager(getActivity());
linearLayoutManager.setOrientation(LinearLayoutManager.VERTICAL);
mRecyclerView.setLayoutManager(linearLayoutManager);
mAdapter = new Adapter(this);
mRecyclerView.setAdapter(mAdapter);
MainActivityFragment f = (MainActivityFragment) getFragmentManager().findFragmentByTag(MainActivityFragment.TAG);
if(f != null)
f.getClassesSummary(mPastCurrent);
return view;
}
@Override
public void onResume() {
super.onResume();
if(REQUERY) {
MainActivityFragment f = (MainActivityFragment) getFragmentManager().findFragmentByTag(MainActivityFragment.TAG);
if(f != null)
f.getClassesSummary(mPastCurrent);
REQUERY = false;
DELETE_CLASS = false;
REQUERY_CLASS = false;
}
// TODO other types of REQUERY
else if(DELETE_CLASS) {
if(mPastCurrent == CLASS.getPastCurrent())
mAdapter.removeByClass(CLASS);
DELETE_CLASS = false;
REQUERY_CLASS = false;
REQUERY = false;
CLASS = null;
new Handler().postDelayed(new Runnable() {
@Override
public void run() {
Snackbar.make(getActivity().findViewById(R.id.coordinator_layout), getString(R.string.class_deleted), Snackbar.LENGTH_SHORT).show();
}
}, MainActivity.SNACKBAR_POST_DELAYED_MSEC);
}
else if(REQUERY_CLASS) {
if(mPastCurrent != CLASS.getPastCurrent())
mAdapter.removeByClass(CLASS);
else {
MainActivityFragment f = (MainActivityFragment) getFragmentManager().findFragmentByTag(MainActivityFragment.TAG);
if(f != null)
f.requeryClassSummary(CLASS, getTag());
}
REQUERY_CLASS = false;
DELETE_CLASS = false;
REQUERY = false;
CLASS = null;
}
else {
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
switch(id) {
case R.id.action_sort_class_code:
mAdapter.sortBy(R.id.action_sort_class_code);
return true;
case R.id.action_sort_class_description:
mAdapter.sortBy(R.id.action_sort_class_description);
return true;
case R.id.action_sort_academic_term:
mAdapter.sortBy(R.id.action_sort_academic_term);
return true;
case R.id.action_sort_year:
mAdapter.sortBy(R.id.action_sort_year);
return true;
}
return super.onOptionsItemSelected(item);
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
super.onCreateOptionsMenu(menu, inflater);
inflater.inflate(R.menu.fragment_classes, menu);
}
public void onGetClassesSummary(ArrayList<ClassSummary> arrayList, PastCurrentEnum pastCurrent) {
mAdapter.setArrayList(arrayList);
}
public void insertClass(ClassContract.ClassEntry entry) {
ClassSummary classSummary = new ClassSummary(entry);
mAdapter.add(classSummary);
mRecyclerView.smoothScrollToPosition(mAdapter.getItemCount() - 1);
new Handler().postDelayed(new Runnable() {
@Override
public void run() {
Snackbar.make(getActivity().findViewById(R.id.coordinator_layout), getString(R.string.class_added), Snackbar.LENGTH_SHORT).show();
}
}, MainActivity.SNACKBAR_POST_DELAYED_MSEC);
}
public void onRequeryClassSummary(ClassSummary classSummary, String fragmentTag) {
if(mPastCurrent == classSummary.mClass.getPastCurrent()) {
mAdapter.update(classSummary);
}
}
public static class Adapter extends RecyclerView.Adapter<Adapter.ViewHolder> {
private Fragment mFragment;
private ArrayList<ClassSummary> mArrayList;
private int mSortId;
private Comparator<ClassSummary> mComparator;
public Adapter(Fragment fragment) {
mFragment = fragment;
mArrayList = new ArrayList<>();
mComparator = new Comparator<ClassSummary>() {
@Override
public int compare(ClassSummary lhs, ClassSummary rhs) {
if(mSortId == R.id.action_sort_class_code) {
return lhs.mClass.getCode().compareToIgnoreCase(rhs.mClass.getCode());
}
else if(-mSortId == R.id.action_sort_class_code) {
return -lhs.mClass.getCode().compareToIgnoreCase(rhs.mClass.getCode());
}
else if(mSortId == R.id.action_sort_class_description) {
return lhs.mClass.getDescription().compareToIgnoreCase(rhs.mClass.getDescription());
}
else if(-mSortId == R.id.action_sort_class_description) {
return -lhs.mClass.getDescription().compareToIgnoreCase(rhs.mClass.getDescription());
}
else if(mSortId == R.id.action_sort_academic_term) {
AcademicTermContract.AcademicTermEntry lat = lhs.mClass.getAcademicTerm();
AcademicTermContract.AcademicTermEntry rat = rhs.mClass.getAcademicTerm();
if(lat == null)
return 1;
if(rat == null)
return -1;
return lat.getDescription().compareToIgnoreCase(rat.getDescription());
}
else if(-mSortId == R.id.action_sort_academic_term) {
AcademicTermContract.AcademicTermEntry lat = lhs.mClass.getAcademicTerm();
AcademicTermContract.AcademicTermEntry rat = rhs.mClass.getAcademicTerm();
if(lat == null)
return -1;
if(rat == null)
return 1;
return -lat.getDescription().compareToIgnoreCase(rat.getDescription());
}
else if(mSortId == R.id.action_sort_year) {
Long lYear = lhs.mClass.getYear();
Long rYear = rhs.mClass.getYear();
if(lYear == null)
return 1;
if(rYear == null)
return -1;
return (lYear < rYear ? -1 : 1);
}
else if(-mSortId == R.id.action_sort_year) {
Long lYear = lhs.mClass.getYear();
Long rYear = rhs.mClass.getYear();
if(lYear == null)
return -1;
if(rYear == null)
return 1;
return (lYear < rYear ? 1 : -1);
}
return 0;
}
};
}
public void setArrayList(ArrayList<ClassSummary> arrayList) {
mArrayList = arrayList;
notifyDataSetChanged();
}
public void add(ClassSummary entry) {
mArrayList.add(entry);
notifyDataSetChanged();
}
public boolean containsByClass(ClassContract.ClassEntry _class) {
for(ClassSummary classSummary : mArrayList) {
if(classSummary.mClass.equals(_class))
return true;
}
return false;
}
public ClassSummary removeByClass(ClassContract.ClassEntry _class) {
ClassSummary classSummary = null;
int size = mArrayList.size();
for(int i = 0; i < size; ++i) {
classSummary = mArrayList.get(i);
if(classSummary.mClass.equals(_class)) {
mArrayList.remove(i);
break;
}
}
notifyDataSetChanged();
return classSummary;
}
public void update(ClassSummary classSummary) {
ClassSummary tmpClassSummary;
int size = mArrayList.size(), pos = size;
for(int i = 0; i < size; ++i) {
tmpClassSummary = mArrayList.get(i);
if(tmpClassSummary.mClass.equals(classSummary.mClass)) {
pos = i;
break;
}
}
if(pos < size)
mArrayList.remove(pos);
mArrayList.add(pos, classSummary);
notifyDataSetChanged();
}
public void sortBy(int sortId) {
mSortId = (mSortId == sortId) ? -sortId : sortId;
Collections.sort(mArrayList, mComparator);
notifyDataSetChanged();
}
@Override
public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
return new ViewHolder(LayoutInflater.from(parent.getContext()).inflate(R.layout.view_holder_class_summary, parent, false));
}
// TODO adjust formatting for empty fields
@Override
public void onBindViewHolder(ViewHolder holder, int position) {
ClassSummary entry = mArrayList.get(position);
AcademicTermContract.AcademicTermEntry academicTerm = entry.mClass.getAcademicTerm();
if(academicTerm != null)
holder.mBackgroundLayout.setBackgroundResource(BackgroundRect.getBackgroundResource(academicTerm.getColor(), mFragment.getContext()));
else
holder.mBackgroundLayout.setBackgroundResource(BackgroundRect.getBackgroundResource(null, mFragment.getContext()));
Resources res = mFragment.getResources();
int topMargin = res.getDimensionPixelSize(R.dimen.recycler_view_item_margin_top);
int rightMargin = res.getDimensionPixelSize(R.dimen.recycler_view_item_margin_right);
int bottomMargin = res.getDimensionPixelSize(R.dimen.recycler_view_item_margin_bottom);
int leftMargin = res.getDimensionPixelSize(R.dimen.recycler_view_item_margin_left);
if(position == 0) {
LinearLayout.LayoutParams layoutParams = (LinearLayout.LayoutParams) holder.mBackgroundLayout.getLayoutParams();
layoutParams.setMargins(leftMargin, topMargin * 2, rightMargin, bottomMargin);
holder.mBackgroundLayout.setLayoutParams(layoutParams);
}
else if(position == (mArrayList.size() - 1)) {
LinearLayout.LayoutParams layoutParams = (LinearLayout.LayoutParams) holder.mBackgroundLayout.getLayoutParams();
layoutParams.setMargins(leftMargin, topMargin, rightMargin, bottomMargin * 2);
holder.mBackgroundLayout.setLayoutParams(layoutParams);
}
else {
LinearLayout.LayoutParams layoutParams = (LinearLayout.LayoutParams) holder.mBackgroundLayout.getLayoutParams();
layoutParams.setMargins(leftMargin, topMargin, rightMargin, bottomMargin);
holder.mBackgroundLayout.setLayoutParams(layoutParams);
}
holder.mClickableLayout.setTag(entry);
holder.mClickableLayout.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
ClassSummary classSummary = (ClassSummary) v.getTag();
if(classSummary.mClass.isLocked()) {
FragmentManager fm = mFragment.getFragmentManager();
UnlockPasswordDialogFragment df = (UnlockPasswordDialogFragment) fm.findFragmentByTag(UnlockPasswordDialogFragment.TAG);
if(df == null) {
df = UnlockPasswordDialogFragment.newInstance(classSummary.mClass, UnlockPasswordDialogFragment.Option.UNLOCK_CLASS);
df.show(fm, UnlockPasswordDialogFragment.TAG);
}
}
else {
Intent intent = new Intent(mFragment.getContext(), ClassActivity.class);
Bundle args = new Bundle();
args.putSerializable(ClassActivity.CLASS_ARG, classSummary.mClass);
intent.putExtras(args);
mFragment.startActivity(intent);
}
}
});
if(entry.mClass.isLocked()) {
holder.mTitleTextView.setText(String.format("%s %s", entry.mClass.getCode(), mFragment.getString(R.string.ellipsis)));
holder.mLockedImageView.setVisibility(View.VISIBLE);
holder.mAcademicTermTextView.setVisibility(View.GONE);
}
else {
holder.mTitleTextView.setText(entry.mClass.getTitle());
holder.mLockedImageView.setVisibility(View.GONE);
String academicTermYear = entry.mClass.getAcademicTermYear();
if(StringUtils.isBlank(academicTermYear)) {
holder.mAcademicTermTextView.setVisibility(View.GONE);
}
else {
holder.mAcademicTermTextView.setText(academicTermYear);
holder.mAcademicTermTextView.setVisibility(View.VISIBLE);
}
}
if(holder.mAcademicTermTextView.getVisibility() == View.GONE) {
int paddingTop = holder.mTitleTextView.getPaddingTop();
int paddingRight = holder.mTitleTextView.getPaddingRight();
int paddingLeft = holder.mTitleTextView.getPaddingLeft();
holder.mTitleTextView.setPadding(paddingLeft, paddingTop, paddingRight, paddingTop);
}
else {
int paddingTop = holder.mTitleTextView.getPaddingTop();
int paddingRight = holder.mTitleTextView.getPaddingRight();
int paddingLeft = holder.mTitleTextView.getPaddingLeft();
holder.mTitleTextView.setPadding(paddingLeft, paddingTop, paddingRight, 0);
}
if(entry.mClassSchedules.size() == 0) {
holder.mClassScheduleTimeLocationTextView.setVisibility(View.GONE);
}
else {
ClassScheduleContract.ClassScheduleEntry classSchedule = entry.mClassSchedules.get(0);
String time = classSchedule.getTime(ApolloDbAdapter.getAppContext());
if(entry.mClassSchedules.size() > 1)
time = time + " ...";
String location = classSchedule.getLocation();
if(!StringUtils.isBlank(location)) {
time = time + "\n" + location;
}
holder.mClassScheduleTimeLocationTextView.setText(time);
holder.mClassScheduleTimeLocationTextView.setVisibility(View.VISIBLE);
}
if(entry.mStudentCount == 0) {
holder.mStudentCountTextView.setVisibility(View.GONE);
}
else {
holder.mStudentCountTextView.setText(String.format("%s %d", mFragment.getString(R.string.students_label), entry.mStudentCount));
holder.mStudentCountTextView.setVisibility(View.VISIBLE);
}
if(entry.mItemSummaryCount.size() == 0) {
holder.mItemCountTextView.setVisibility(View.GONE);
holder.mItemSummaryCountLinearLayout.setVisibility(View.GONE);
if(holder.mStudentCountTextView.getVisibility() == View.VISIBLE) {
int paddingTop = holder.mStudentCountTextView.getPaddingTop();
int paddingRight = holder.mStudentCountTextView.getPaddingRight();
int paddingLeft = holder.mStudentCountTextView.getPaddingLeft();
holder.mStudentCountTextView.setPadding(paddingLeft, paddingTop, paddingRight, paddingTop);
}
else {
int paddingTop = holder.mStudentCountTextView.getPaddingTop();
int paddingRight = holder.mStudentCountTextView.getPaddingRight();
int paddingLeft = holder.mStudentCountTextView.getPaddingLeft();
holder.mStudentCountTextView.setPadding(paddingLeft, paddingTop, paddingRight, 0);
}
}
else {
LayoutInflater inflater = mFragment.getLayoutInflater(null);
int itemSummaryTotal = 0, count;
View view; TextView textView;
holder.mItemSummaryCountLinearLayout.removeAllViews();
for(Map.Entry<ClassItemTypeContract.ClassItemTypeEntry, Integer> itemCount : entry.mItemSummaryCount.entrySet()) {
ClassItemTypeContract.ClassItemTypeEntry itemType = itemCount.getKey();
count = itemCount.getValue();
view = inflater.inflate(R.layout.row_class_summary_item_count, null);
textView = (TextView) view.findViewById(R.id.text_view);
textView.setBackgroundResource(BackgroundRect.getBackgroundResource(itemType.getColor(), mFragment.getContext()));
textView.setText(String.format("%s%s %d", itemType.getDescription(), mFragment.getString(R.string.colon), count));
holder.mItemSummaryCountLinearLayout.addView(view);
itemSummaryTotal = itemSummaryTotal + count;
}
holder.mItemCountTextView.setText(String.format("%s %d", mFragment.getString(R.string.class_activities_label), itemSummaryTotal));
holder.mItemCountTextView.setVisibility(View.VISIBLE);
holder.mItemSummaryCountLinearLayout.setVisibility(View.VISIBLE);
if(holder.mStudentCountTextView.getVisibility() == View.GONE) {
int paddingTop = holder.mTitleTextView.getPaddingTop();
int paddingRight = holder.mItemCountTextView.getPaddingRight();
int paddingBottom = holder.mItemCountTextView.getPaddingBottom();
int paddingLeft = holder.mItemCountTextView.getPaddingLeft();
holder.mItemCountTextView.setPadding(paddingLeft, paddingTop, paddingRight, paddingBottom);
}
else {
int paddingRight = holder.mItemCountTextView.getPaddingRight();
int paddingBottom = holder.mItemCountTextView.getPaddingBottom();
int paddingLeft = holder.mItemCountTextView.getPaddingLeft();
holder.mItemCountTextView.setPadding(paddingLeft, 0, paddingRight, paddingBottom);
}
}
if(holder.mClassScheduleTimeLocationTextView.getVisibility() == View.GONE)
holder.mClassScheduleDivider.setVisibility(View.GONE);
else
holder.mClassScheduleDivider.setVisibility(View.VISIBLE);
if(holder.mStudentCountTextView.getVisibility() == View.GONE && holder.mItemCountTextView.getVisibility() == View.GONE)
holder.mStudentCountDivider.setVisibility(View.GONE);
else
holder.mStudentCountDivider.setVisibility(View.VISIBLE);
}
@Override
public int getItemCount() {
return mArrayList.size();
}
public static class ViewHolder extends RecyclerView.ViewHolder {
protected LinearLayout mBackgroundLayout;
protected LinearLayout mClickableLayout;
protected TextView mTitleTextView;
protected ImageView mLockedImageView;
protected TextView mAcademicTermTextView;
protected View mClassScheduleDivider;
protected TextView mClassScheduleTimeLocationTextView;
protected View mStudentCountDivider;
protected TextView mStudentCountTextView;
protected TextView mItemCountTextView;
protected LinearLayout mItemSummaryCountLinearLayout;
public ViewHolder(View itemView) {
super(itemView);
mBackgroundLayout = (LinearLayout) itemView.findViewById(R.id.background_layout);
mClickableLayout = (LinearLayout) itemView.findViewById(R.id.clickable_layout);
mTitleTextView = (TextView) itemView.findViewById(R.id.title_text_view);
mLockedImageView = (ImageView) itemView.findViewById(R.id.locked_image_view);
mAcademicTermTextView = (TextView) itemView.findViewById(R.id.academic_term_text_view);
mClassScheduleDivider = itemView.findViewById(R.id.class_schedule_divider);
mClassScheduleTimeLocationTextView = (TextView) itemView.findViewById(R.id.class_schedule_time_location_text_view);
mStudentCountDivider = itemView.findViewById(R.id.student_count_divider);
mStudentCountTextView = (TextView) itemView.findViewById(R.id.student_count_text_view);
mItemCountTextView = (TextView) itemView.findViewById(R.id.item_count_text_view);
mItemSummaryCountLinearLayout = (LinearLayout) itemView.findViewById(R.id.item_summary_count_linear_layout);
}
}
}
public static class ClassSummary {
public ClassContract.ClassEntry mClass;
public ArrayList<ClassScheduleContract.ClassScheduleEntry> mClassSchedules;
public long mStudentCount;
public LinkedHashMap<ClassItemTypeContract.ClassItemTypeEntry, Integer> mItemSummaryCount;
public ClassSummary(ClassContract.ClassEntry _class) {
mClass = _class;
mClassSchedules = new ArrayList<>();
mStudentCount = 0;
mItemSummaryCount = new LinkedHashMap<>();
}
}
}
|
|
package org.jentrata.ebms.as4.internal.routes;
import com.google.common.collect.ImmutableList;
import org.apache.camel.Exchange;
import org.apache.camel.Header;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.impl.DefaultExchange;
import org.apache.camel.impl.JndiRegistry;
import org.apache.camel.test.junit4.CamelTestSupport;
import org.jentrata.ebms.EbmsConstants;
import org.jentrata.ebms.EbmsError;
import org.jentrata.ebms.MessageType;
import org.jentrata.ebms.cpa.*;
import org.jentrata.ebms.cpa.pmode.BusinessInfo;
import org.jentrata.ebms.cpa.pmode.Protocol;
import org.jentrata.ebms.cpa.pmode.Service;
import org.jentrata.ebms.messaging.Message;
import org.jentrata.ebms.messaging.MessageStore;
import org.jentrata.ebms.utils.EbmsUtils;
import org.junit.Test;
import org.w3c.dom.Document;
import javax.xml.xpath.XPathExpressionException;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.*;
import static org.mockito.Mockito.*;
/**
* Unit test for org.jentrata.ebms.as4.internal.routes.ValidatePartnerAgreementRouteBuilder
*
* @author aaronwalker
*/
public class ValidatePartnerAgreementRouteBuilderTest extends CamelTestSupport {
private CPARepository cpaRepository;
@Test
public void testHasValidPartnerAgreement() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_SERVICE, "service1");
request.getIn().setHeader(EbmsConstants.MESSAGE_ACTION,"action1");
Exchange response = context().createProducerTemplate().send("direct:validatePartner",request);
assertThat(response.getIn().getHeader(EbmsConstants.VALID_PARTNER_AGREEMENT,Boolean.class),is(true));
}
@Test
public void testHasInvalidPartnerAgreement() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_SERVICE, "testServiceInvalid");
request.getIn().setHeader(EbmsConstants.MESSAGE_ACTION, "testAction");
Exchange response = context().createProducerTemplate().send("direct:validatePartner",request);
assertThat(request.isFailed(),equalTo(true));
assertThat(request.getException(),instanceOf(InvalidPartnerAgreementException.class));
assertThat(response.getIn().getHeader(EbmsConstants.VALID_PARTNER_AGREEMENT, Boolean.class),is(false));
}
@Test
public void testNullServiceAndAction() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
Exchange response = context().createProducerTemplate().send("direct:validatePartner",request);
assertThat(request.isFailed(), equalTo(true));
assertThat(request.getException(),instanceOf(InvalidPartnerAgreementException.class));
assertThat(response.getIn().getHeader(EbmsConstants.VALID_PARTNER_AGREEMENT,Boolean.class),is(false));
}
@Test
public void testLookupCPAId() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_SERVICE, "testService");
request.getIn().setHeader(EbmsConstants.MESSAGE_ACTION,"testAction");
request.getIn().setHeader(EbmsConstants.MESSAGE_TYPE, MessageType.USER_MESSAGE);
Exchange response = context().createProducerTemplate().send("direct:lookupCpaId",request);
assertThat(response.getIn().getHeader(EbmsConstants.CPA, PartnerAgreement.class),is(notNullValue()));
assertThat(response.getIn().getHeader(EbmsConstants.CPA_ID, String.class),equalTo("testCPAId"));
}
@Test
public void testLookupCPAIdForSignalMessage() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_TYPE, MessageType.SIGNAL_MESSAGE);
Exchange response = context().createProducerTemplate().send("direct:lookupCpaId",request);
assertThat(response.getIn().getHeader(EbmsConstants.CPA, PartnerAgreement.class),is(notNullValue()));
assertThat(response.getIn().getHeader(EbmsConstants.CPA_ID, String.class),equalTo("testCPAId"));
}
@Test
public void testLookupCPAIdForSignalMessageWithUserMessage() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage("simple-as4-receipt.xml"));
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_TYPE, MessageType.SIGNAL_MESSAGE_WITH_USER_MESSAGE);
Exchange response = context().createProducerTemplate().send("direct:lookupCpaId",request);
assertThat(response.getIn().getHeader(EbmsConstants.CPA, PartnerAgreement.class),is(notNullValue()));
assertThat(response.getIn().getHeader(EbmsConstants.CPA_ID, String.class),equalTo("testCPAId"));
}
@Test
public void testInvalidLookupCPAId() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_SERVICE, "testService");
request.getIn().setHeader(EbmsConstants.MESSAGE_ACTION,"testAction2");
Exchange response = context().createProducerTemplate().send("direct:lookupCpaId",request);
assertThat(response.getIn().getHeader(EbmsConstants.CPA, PartnerAgreement.class),is(nullValue()));
assertThat(response.getIn().getHeader(EbmsConstants.CPA_ID, String.class),equalTo(EbmsConstants.CPA_ID_UNKNOWN));
}
@Test
public void testDefaultLookupCPAId() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_SERVICE, "testService");
request.getIn().setHeader(EbmsConstants.MESSAGE_ACTION,"testAction2");
request.getIn().setHeader(EbmsConstants.DEFAULT_CPA_ID,"testCPAId");
Exchange response = context().createProducerTemplate().send("direct:lookupCpaId",request);
assertThat(response.getIn().getHeader(EbmsConstants.CPA, PartnerAgreement.class),is(notNullValue()));
assertThat(response.getIn().getHeader(EbmsConstants.CPA_ID, String.class),equalTo("testCPAId"));
}
@Test
public void testValidationErrors() throws Exception {
Exchange request = new DefaultExchange(context());
request.getIn().setBody(loadEbmsMessage());
request.getIn().setHeader(EbmsConstants.MESSAGE_ID, "testMsgID");
request.getIn().setHeader(EbmsConstants.REF_TO_MESSAGE_ID, "testRefMsgID");
request.getIn().setHeader(EbmsConstants.MESSAGE_SERVICE, "testServiceValidation");
request.getIn().setHeader(EbmsConstants.MESSAGE_ACTION,"testActionValidation");
request.getIn().setHeader(EbmsConstants.MESSAGE_TYPE, MessageType.USER_MESSAGE);
request.getIn().setHeader(EbmsConstants.CPA,createPartnerAgreement());
Exchange response = context().createProducerTemplate().send("direct:validatePartner",request);
assertThat(response.isFailed(),is(true));
assertThat(response.getException(),instanceOf(InvalidPartnerAgreementException.class));
InvalidPartnerAgreementException exception = response.getException(InvalidPartnerAgreementException.class);
assertThat(exception.getValidationErrors(),hasSize(1));
assertThat(exception.getValidationErrors().get(0).getError(),equalTo(EbmsError.EBMS_0003));
}
private PartnerAgreement createPartnerAgreement() {
PartnerAgreement agreement = new PartnerAgreement();
agreement.setCpaId("validationErrors");
BusinessInfo businessInfo = new BusinessInfo();
agreement.setBusinessInfo(businessInfo);
Service service = new Service("testServiceValidation","testActionValidation");
ValidationPredicate predicate = new ValidationPredicate() {
@Override
public boolean matches(Exchange exchange) {
exchange.getIn().setHeader(EbmsConstants.VALIDATION_ERROR_DESC,"message invalid....just because I don't like you");
return false;
}
};
service.setValidations(Arrays.asList(predicate));
agreement.getBusinessInfo().setServices(Arrays.asList(service));
return agreement;
}
private InputStream loadEbmsMessage() throws IOException {
return loadEbmsMessage("sample-ebms-user-message.xml");
}
private InputStream loadEbmsMessage(String filename) throws IOException {
return new ByteArrayInputStream(EbmsUtils.toStringFromClasspath(filename).getBytes());
}
@Override
protected JndiRegistry createRegistry() throws Exception {
JndiRegistry registry = super.createRegistry();
cpaRepository = new MockCpaRepository();
registry.bind("cpaRepository", cpaRepository);
registry.bind("messageStore", mockMessageStore());
return registry;
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
ValidatePartnerAgreementRouteBuilder routeBuilder = new ValidatePartnerAgreementRouteBuilder();
return routeBuilder;
}
private class MockCpaRepository implements CPARepository {
@Override
public List<PartnerAgreement> getPartnerAgreements() {
return null;
}
@Override
public List<PartnerAgreement> getActivePartnerAgreements() {
return null;
}
@Override
public PartnerAgreement findByCPAId(@Header(EbmsConstants.CPA_ID) String cpaId) {
PartnerAgreement partnerAgreement = new PartnerAgreement();
partnerAgreement.setCpaId("testCPAId");
Protocol protocol = new Protocol();
protocol.setAddress("http://example.jentrata.com");
partnerAgreement.setProtocol(protocol);
BusinessInfo businessInfo = new BusinessInfo();
businessInfo.setServices(new ImmutableList.Builder<Service>()
.add(new Service("service", "action"))
.build()
);
partnerAgreement.setBusinessInfo(businessInfo);
return partnerAgreement;
}
@Override
public PartnerAgreement findByServiceAndAction(String service, String action) {
switch (service + "|" + action) {
case "service1|action1":
PartnerAgreement partnerAgreement = new PartnerAgreement();
partnerAgreement.setCpaId("testCPAId");
Protocol protocol = new Protocol();
protocol.setAddress("http://example.jentrata.com");
partnerAgreement.setProtocol(protocol);
BusinessInfo businessInfo = new BusinessInfo();
partnerAgreement.setBusinessInfo(businessInfo);
businessInfo.setServices(new ImmutableList.Builder<Service>()
.add(new Service(service, action))
.build()
);
return partnerAgreement;
case "testServiceValidation|testActionValidation":
return createPartnerAgreement();
default:
return null;
}
}
@Override
public PartnerAgreement findByMessage(Document message, String ebmsVersion) {
try {
String serviceValue = EbmsUtils.ebmsXpathValue(message.getDocumentElement(), "//eb3:CollaborationInfo/eb3:Service/text()");
String actionValue = EbmsUtils.ebmsXpathValue(message.getDocumentElement(),"//eb3:CollaborationInfo/eb3:Action/text()");
return findByServiceAndAction(serviceValue,actionValue);
} catch (XPathExpressionException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean isValidPartnerAgreement(Map<String, Object> fields) {
String service = (String) fields.get(EbmsConstants.MESSAGE_SERVICE);
String action = (String) fields.get(EbmsConstants.MESSAGE_ACTION);
PartnerAgreement agreement = findByServiceAndAction(service,action);
return agreement != null;
}
}
private MessageStore mockMessageStore() {
MessageStore mock = mock(MessageStore.class);
Message message = mock(Message.class);
doReturn("testCPAId").when(message).getCpaId();
doReturn(message).when(mock).findByMessageId(anyString(),anyString());
return mock;
}
}
|
|
package poweraqua.core.utils;
import java.io.PrintStream;
import java.util.Vector;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class LabelSplitter
{
public String splitLabel(String label)
{
String result = "";
int firstSeparator = containsSeparator(label);
if (firstSeparator > -1) {
result = splitOnSeparators(label);
} else if (toSplit(label)) {
result = splitOnCaps(label);
} else {
result = label;
}
return result;
}
public static String splitOnCaps(String label)
{
if (label.length() < 3) {
return label;
}
if (label.contains(" ")) {
return label;
}
String result = "";
Character curr = null;
Character next = null;
String element = "";
for (int i = 0; i < label.length() - 1; i++)
{
Character prev = curr;curr = new Character(label.charAt(i));
next = new Character(label.charAt(i + 1));
if (Character.isUpperCase(curr.charValue()))
{
if (prev == null)
{
element = element + curr.toString();
}
else if (Character.isUpperCase(prev.charValue()))
{
if (Character.isUpperCase(next.charValue()))
{
element = element + curr.toString();
}
else
{
result = result + element + " ";
element = curr.toString();
}
}
else
{
result = result + element + " ";
element = curr.toString();
}
}
else {
element = element + curr.toString();
}
}
element = element + next.toString();
result = result + element + " ";
return result.trim();
}
public String splitOnSeparators(String label)
{
String result = "";
int firstSeparator = containsSeparator(label);
while (firstSeparator > -1)
{
String element = label.substring(0, firstSeparator);
if (toSplit(element)) {
element = splitOnCaps(element);
}
result = result + element + "/";
label = label.substring(firstSeparator + 1, label.length());
firstSeparator = containsSeparator(label);
}
if (toSplit(label)) {
label = splitOnCaps(label);
}
result = result + label + "/";
return result;
}
public int containsSeparator(String label)
{
int result = -1;
int indexOf_ = label.indexOf("_");
if (indexOf_ > -1) {
result = indexOf_;
}
int indexOfMinus = label.indexOf("-");
if (indexOfMinus > -1) {
if (result != -1)
{
if (indexOfMinus < result) {
result = indexOfMinus;
}
}
else {
result = indexOfMinus;
}
}
int indexOfDot = label.indexOf(".");
if (indexOfDot > -1) {
if (result != -1)
{
if (indexOfDot < result) {
result = indexOfDot;
}
}
else {
result = indexOfDot;
}
}
int indexOfSpace = label.indexOf(" ");
if (indexOfSpace > -1) {
if (result != -1)
{
if (indexOfSpace < result) {
result = indexOfSpace;
}
}
else {
result = indexOfSpace;
}
}
int indexOfPlus = label.indexOf("+");
if (indexOfPlus > -1) {
if (result != -1)
{
if (indexOfPlus < result) {
result = indexOfPlus;
}
}
else {
result = indexOfPlus;
}
}
return result;
}
public boolean toSplit(String label)
{
Pattern oneLowerCase = Pattern.compile("[a-z]+");
Matcher m1 = oneLowerCase.matcher(label);
Pattern upperCase = Pattern.compile(".+[A-Z]+");
Matcher m2 = upperCase.matcher(label);
return (m1.find()) && (m2.find());
}
public boolean isPartOfLabel(String term, String label)
{
boolean result = false;
while (label.indexOf("/") > -1)
{
String element = label.substring(0, label.indexOf("/"));
label = label.substring(label.indexOf("/") + 1, label.length());
if (term.compareToIgnoreCase(element) == 0) {
result = true;
}
}
if (term.compareToIgnoreCase(label) == 0) {
result = true;
}
return result;
}
public Vector toVector(String a)
{
Vector result = new Vector();
while (a.indexOf("/") > -1)
{
String part = a.substring(0, a.indexOf("/"));
a = a.substring(a.indexOf("/") + 1, a.length());
result.add(part);
}
return result;
}
public static void main(String[] args)
throws Exception
{
String word = "romanCityWalls";
System.out.println(splitOnCaps(word));
}
}
|
|
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.okhttp.internal.spdy;
import com.squareup.okhttp.internal.Util;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import okio.Buffer;
import okio.BufferedSink;
import okio.BufferedSource;
import okio.Okio;
import okio.Source;
import org.junit.After;
import org.junit.Test;
import static com.squareup.okhttp.TestUtil.headerEntries;
import static com.squareup.okhttp.internal.spdy.ErrorCode.CANCEL;
import static com.squareup.okhttp.internal.spdy.ErrorCode.PROTOCOL_ERROR;
import static com.squareup.okhttp.internal.spdy.Settings.DEFAULT_INITIAL_WINDOW_SIZE;
import static com.squareup.okhttp.internal.spdy.Settings.PERSIST_VALUE;
import static com.squareup.okhttp.internal.spdy.Spdy3.TYPE_DATA;
import static com.squareup.okhttp.internal.spdy.Spdy3.TYPE_HEADERS;
import static com.squareup.okhttp.internal.spdy.Spdy3.TYPE_PING;
import static com.squareup.okhttp.internal.spdy.Spdy3.TYPE_RST_STREAM;
import static com.squareup.okhttp.internal.spdy.Spdy3.TYPE_SETTINGS;
import static com.squareup.okhttp.internal.spdy.Spdy3.TYPE_WINDOW_UPDATE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public final class Http2ConnectionTest {
private static final Variant HTTP_2 = new Http2();
private final MockSpdyPeer peer = new MockSpdyPeer();
@After public void tearDown() throws Exception {
peer.close();
}
@Test public void serverPingsClientHttp2() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// write the mocking script
peer.sendFrame().ping(false, 2, 3);
peer.acceptFrame(); // PING
peer.play();
// play it back
connection(peer, HTTP_2);
// verify the peer received what was expected
MockSpdyPeer.InFrame ping = peer.takeFrame();
assertEquals(TYPE_PING, ping.type);
assertEquals(0, ping.streamId);
assertEquals(2, ping.payload1);
assertEquals(3, ping.payload2);
assertTrue(ping.ack);
}
@Test public void clientPingsServerHttp2() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// write the mocking script
peer.acceptFrame(); // PING
peer.sendFrame().ping(true, 1, 5);
peer.play();
// play it back
SpdyConnection connection = connection(peer, HTTP_2);
Ping ping = connection.ping();
assertTrue(ping.roundTripTime() > 0);
assertTrue(ping.roundTripTime() < TimeUnit.SECONDS.toNanos(1));
// verify the peer received what was expected
MockSpdyPeer.InFrame pingFrame = peer.takeFrame();
assertEquals(0, pingFrame.streamId);
assertEquals(1, pingFrame.payload1);
assertEquals(0x4f4b6f6b, pingFrame.payload2); // connection.ping() sets this.
assertFalse(pingFrame.ack);
}
@Test public void peerHttp2ServerLowersInitialWindowSize() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
Settings initial = new Settings();
initial.set(Settings.INITIAL_WINDOW_SIZE, PERSIST_VALUE, 1684);
Settings shouldntImpactConnection = new Settings();
shouldntImpactConnection.set(Settings.INITIAL_WINDOW_SIZE, PERSIST_VALUE, 3368);
peer.sendFrame().settings(initial);
peer.acceptFrame(); // ACK
peer.sendFrame().settings(shouldntImpactConnection);
peer.acceptFrame(); // ACK 2
peer.acceptFrame(); // HEADERS
peer.play();
SpdyConnection connection = connection(peer, HTTP_2);
// Default is 64KiB - 1.
assertEquals(65535, connection.peerSettings.getInitialWindowSize(-1));
// Verify the peer received the ACK.
MockSpdyPeer.InFrame ackFrame = peer.takeFrame();
assertEquals(TYPE_SETTINGS, ackFrame.type);
assertEquals(0, ackFrame.streamId);
assertTrue(ackFrame.ack);
ackFrame = peer.takeFrame();
assertEquals(TYPE_SETTINGS, ackFrame.type);
assertEquals(0, ackFrame.streamId);
assertTrue(ackFrame.ack);
// This stream was created *after* the connection settings were adjusted.
SpdyStream stream = connection.newStream(headerEntries("a", "android"), false, true);
assertEquals(3368, connection.peerSettings.getInitialWindowSize(DEFAULT_INITIAL_WINDOW_SIZE));
assertEquals(1684, connection.bytesLeftInWriteWindow); // initial wasn't affected.
// New Stream is has the most recent initial window size.
assertEquals(3368, stream.bytesLeftInWriteWindow);
}
@Test public void peerHttp2ServerZerosCompressionTable() throws Exception {
boolean client = false; // Peer is server, so we are client.
Settings settings = new Settings();
settings.set(Settings.HEADER_TABLE_SIZE, PERSIST_VALUE, 0);
SpdyConnection connection = sendHttp2SettingsAndCheckForAck(client, settings);
// verify the peer's settings were read and applied.
assertEquals(0, connection.peerSettings.getHeaderTableSize());
Http2.Reader frameReader = (Http2.Reader) connection.readerRunnable.frameReader;
assertEquals(0, frameReader.hpackReader.maxDynamicTableByteCount());
// TODO: when supported, check the frameWriter's compression table is unaffected.
}
@Test public void peerHttp2ClientDisablesPush() throws Exception {
boolean client = false; // Peer is client, so we are server.
Settings settings = new Settings();
settings.set(Settings.ENABLE_PUSH, 0, 0); // The peer client disables push.
SpdyConnection connection = sendHttp2SettingsAndCheckForAck(client, settings);
// verify the peer's settings were read and applied.
assertFalse(connection.peerSettings.getEnablePush(true));
}
@Test public void peerIncreasesMaxFrameSize() throws Exception {
int newMaxFrameSize = 0x4001;
Settings settings = new Settings();
settings.set(Settings.MAX_FRAME_SIZE, 0, newMaxFrameSize);
SpdyConnection connection = sendHttp2SettingsAndCheckForAck(true, settings);
// verify the peer's settings were read and applied.
assertEquals(newMaxFrameSize, connection.peerSettings.getMaxFrameSize(-1));
assertEquals(newMaxFrameSize, connection.frameWriter.maxDataLength());
}
@Test public void receiveGoAwayHttp2() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// write the mocking script
peer.acceptFrame(); // SYN_STREAM 3
peer.acceptFrame(); // SYN_STREAM 5
peer.sendFrame().goAway(3, PROTOCOL_ERROR, Util.EMPTY_BYTE_ARRAY);
peer.acceptFrame(); // PING
peer.sendFrame().ping(true, 1, 0);
peer.acceptFrame(); // DATA STREAM 3
peer.play();
// play it back
SpdyConnection connection = connection(peer, HTTP_2);
SpdyStream stream1 = connection.newStream(headerEntries("a", "android"), true, true);
SpdyStream stream2 = connection.newStream(headerEntries("b", "banana"), true, true);
connection.ping().roundTripTime(); // Ensure the GO_AWAY that resets stream2 has been received.
BufferedSink sink1 = Okio.buffer(stream1.getSink());
BufferedSink sink2 = Okio.buffer(stream2.getSink());
sink1.writeUtf8("abc");
try {
sink2.writeUtf8("abc");
sink2.flush();
fail();
} catch (IOException expected) {
assertEquals("stream was reset: REFUSED_STREAM", expected.getMessage());
}
sink1.writeUtf8("def");
sink1.close();
try {
connection.newStream(headerEntries("c", "cola"), true, true);
fail();
} catch (IOException expected) {
assertEquals("shutdown", expected.getMessage());
}
assertTrue(stream1.isOpen());
assertFalse(stream2.isOpen());
assertEquals(1, connection.openStreamCount());
// verify the peer received what was expected
MockSpdyPeer.InFrame synStream1 = peer.takeFrame();
assertEquals(TYPE_HEADERS, synStream1.type);
MockSpdyPeer.InFrame synStream2 = peer.takeFrame();
assertEquals(TYPE_HEADERS, synStream2.type);
MockSpdyPeer.InFrame ping = peer.takeFrame();
assertEquals(TYPE_PING, ping.type);
MockSpdyPeer.InFrame data1 = peer.takeFrame();
assertEquals(TYPE_DATA, data1.type);
assertEquals(3, data1.streamId);
assertTrue(Arrays.equals("abcdef".getBytes("UTF-8"), data1.data));
}
@Test public void readSendsWindowUpdateHttp2() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
int windowSize = 100;
int windowUpdateThreshold = 50;
// Write the mocking script.
peer.acceptFrame(); // SYN_STREAM
peer.sendFrame().synReply(false, 3, headerEntries("a", "android"));
for (int i = 0; i < 3; i++) {
// Send frames of summing to size 50, which is windowUpdateThreshold.
peer.sendFrame().data(false, 3, data(24), 24);
peer.sendFrame().data(false, 3, data(25), 25);
peer.sendFrame().data(false, 3, data(1), 1);
peer.acceptFrame(); // connection WINDOW UPDATE
peer.acceptFrame(); // stream WINDOW UPDATE
}
peer.sendFrame().data(true, 3, data(0), 0);
peer.play();
// Play it back.
SpdyConnection connection = connection(peer, HTTP_2);
connection.okHttpSettings.set(Settings.INITIAL_WINDOW_SIZE, 0, windowSize);
SpdyStream stream = connection.newStream(headerEntries("b", "banana"), false, true);
assertEquals(0, stream.unacknowledgedBytesRead);
assertEquals(headerEntries("a", "android"), stream.getResponseHeaders());
Source in = stream.getSource();
Buffer buffer = new Buffer();
buffer.writeAll(in);
assertEquals(-1, in.read(buffer, 1));
assertEquals(150, buffer.size());
MockSpdyPeer.InFrame synStream = peer.takeFrame();
assertEquals(TYPE_HEADERS, synStream.type);
for (int i = 0; i < 3; i++) {
List<Integer> windowUpdateStreamIds = new ArrayList<>(2);
for (int j = 0; j < 2; j++) {
MockSpdyPeer.InFrame windowUpdate = peer.takeFrame();
assertEquals(TYPE_WINDOW_UPDATE, windowUpdate.type);
windowUpdateStreamIds.add(windowUpdate.streamId);
assertEquals(windowUpdateThreshold, windowUpdate.windowSizeIncrement);
}
assertTrue(windowUpdateStreamIds.contains(0)); // connection
assertTrue(windowUpdateStreamIds.contains(3)); // stream
}
}
private Buffer data(int byteCount) {
return new Buffer().write(new byte[byteCount]);
}
@Test public void serverSendsEmptyDataClientDoesntSendWindowUpdateHttp2() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// Write the mocking script.
peer.acceptFrame(); // SYN_STREAM
peer.sendFrame().synReply(false, 3, headerEntries("a", "android"));
peer.sendFrame().data(true, 3, data(0), 0);
peer.play();
// Play it back.
SpdyConnection connection = connection(peer, HTTP_2);
SpdyStream client = connection.newStream(headerEntries("b", "banana"), false, true);
assertEquals(-1, client.getSource().read(new Buffer(), 1));
// Verify the peer received what was expected.
MockSpdyPeer.InFrame synStream = peer.takeFrame();
assertEquals(TYPE_HEADERS, synStream.type);
assertEquals(3, peer.frameCount());
}
@Test public void clientSendsEmptyDataServerDoesntSendWindowUpdateHttp2() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// Write the mocking script.
peer.acceptFrame(); // SYN_STREAM
peer.acceptFrame(); // DATA
peer.sendFrame().synReply(false, 3, headerEntries("a", "android"));
peer.play();
// Play it back.
SpdyConnection connection = connection(peer, HTTP_2);
SpdyStream client = connection.newStream(headerEntries("b", "banana"), true, true);
BufferedSink out = Okio.buffer(client.getSink());
out.write(Util.EMPTY_BYTE_ARRAY);
out.flush();
out.close();
// Verify the peer received what was expected.
assertEquals(TYPE_HEADERS, peer.takeFrame().type);
assertEquals(TYPE_DATA, peer.takeFrame().type);
assertEquals(3, peer.frameCount());
}
@Test public void maxFrameSizeHonored() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
byte[] buff = new byte[peer.maxOutboundDataLength() + 1];
Arrays.fill(buff, (byte) '*');
// write the mocking script
peer.acceptFrame(); // SYN_STREAM
peer.sendFrame().synReply(false, 3, headerEntries("a", "android"));
peer.acceptFrame(); // DATA
peer.acceptFrame(); // DATA
peer.play();
// play it back
SpdyConnection connection = connection(peer, HTTP_2);
SpdyStream stream = connection.newStream(headerEntries("b", "banana"), true, true);
BufferedSink out = Okio.buffer(stream.getSink());
out.write(buff);
out.flush();
out.close();
MockSpdyPeer.InFrame synStream = peer.takeFrame();
assertEquals(TYPE_HEADERS, synStream.type);
MockSpdyPeer.InFrame data = peer.takeFrame();
assertEquals(peer.maxOutboundDataLength(), data.data.length);
data = peer.takeFrame();
assertEquals(1, data.data.length);
}
@Test public void pushPromiseStream() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// write the mocking script
peer.acceptFrame(); // SYN_STREAM
peer.sendFrame().synReply(false, 3, headerEntries("a", "android"));
final List<Header> expectedRequestHeaders = Arrays.asList(
new Header(Header.TARGET_METHOD, "GET"),
new Header(Header.TARGET_SCHEME, "https"),
new Header(Header.TARGET_AUTHORITY, "squareup.com"),
new Header(Header.TARGET_PATH, "/cached")
);
peer.sendFrame().pushPromise(3, 2, expectedRequestHeaders);
final List<Header> expectedResponseHeaders = Arrays.asList(
new Header(Header.RESPONSE_STATUS, "200")
);
peer.sendFrame().synReply(true, 2, expectedResponseHeaders);
peer.sendFrame().data(true, 3, data(0), 0);
peer.play();
RecordingPushObserver observer = new RecordingPushObserver();
// play it back
SpdyConnection connection = connectionBuilder(peer, HTTP_2)
.pushObserver(observer).build();
SpdyStream client = connection.newStream(headerEntries("b", "banana"), false, true);
assertEquals(-1, client.getSource().read(new Buffer(), 1));
// verify the peer received what was expected
assertEquals(TYPE_HEADERS, peer.takeFrame().type);
assertEquals(expectedRequestHeaders, observer.takeEvent());
assertEquals(expectedResponseHeaders, observer.takeEvent());
}
@Test public void doublePushPromise() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// write the mocking script
peer.sendFrame().pushPromise(3, 2, headerEntries("a", "android"));
peer.acceptFrame(); // SYN_REPLY
peer.sendFrame().pushPromise(3, 2, headerEntries("b", "banana"));
peer.acceptFrame(); // RST_STREAM
peer.play();
// play it back
SpdyConnection connection = connectionBuilder(peer, HTTP_2).build();
connection.newStream(headerEntries("b", "banana"), false, true);
// verify the peer received what was expected
assertEquals(TYPE_HEADERS, peer.takeFrame().type);
assertEquals(PROTOCOL_ERROR, peer.takeFrame().errorCode);
}
@Test public void pushPromiseStreamsAutomaticallyCancel() throws Exception {
peer.setVariantAndClient(HTTP_2, false);
// write the mocking script
peer.sendFrame().pushPromise(3, 2, Arrays.asList(
new Header(Header.TARGET_METHOD, "GET"),
new Header(Header.TARGET_SCHEME, "https"),
new Header(Header.TARGET_AUTHORITY, "squareup.com"),
new Header(Header.TARGET_PATH, "/cached")
));
peer.sendFrame().synReply(true, 2, Arrays.asList(
new Header(Header.RESPONSE_STATUS, "200")
));
peer.acceptFrame(); // RST_STREAM
peer.play();
// play it back
connectionBuilder(peer, HTTP_2)
.pushObserver(PushObserver.CANCEL).build();
// verify the peer received what was expected
MockSpdyPeer.InFrame rstStream = peer.takeFrame();
assertEquals(TYPE_RST_STREAM, rstStream.type);
assertEquals(2, rstStream.streamId);
assertEquals(CANCEL, rstStream.errorCode);
}
private SpdyConnection sendHttp2SettingsAndCheckForAck(boolean client, Settings settings)
throws IOException, InterruptedException {
peer.setVariantAndClient(HTTP_2, client);
peer.sendFrame().settings(settings);
peer.acceptFrame(); // ACK
peer.acceptFrame(); // PING
peer.sendFrame().ping(true, 1, 0);
peer.play();
// play it back
SpdyConnection connection = connection(peer, HTTP_2);
// verify the peer received the ACK
MockSpdyPeer.InFrame ackFrame = peer.takeFrame();
assertEquals(TYPE_SETTINGS, ackFrame.type);
assertEquals(0, ackFrame.streamId);
assertTrue(ackFrame.ack);
connection.ping().roundTripTime(); // Ensure that settings have been applied before returning.
return connection;
}
private SpdyConnection connection(MockSpdyPeer peer, Variant variant) throws IOException {
return connectionBuilder(peer, variant).build();
}
private SpdyConnection.Builder connectionBuilder(MockSpdyPeer peer, Variant variant)
throws IOException {
return new SpdyConnection.Builder(true, peer.openSocket())
.pushObserver(IGNORE)
.protocol(variant.getProtocol());
}
static final PushObserver IGNORE = new PushObserver() {
@Override public boolean onRequest(int streamId, List<Header> requestHeaders) {
return false;
}
@Override public boolean onHeaders(int streamId, List<Header> responseHeaders, boolean last) {
return false;
}
@Override public boolean onData(int streamId, BufferedSource source, int byteCount,
boolean last) throws IOException {
source.skip(byteCount);
return false;
}
@Override public void onReset(int streamId, ErrorCode errorCode) {
}
};
private static class RecordingPushObserver implements PushObserver {
final List<Object> events = new ArrayList<>();
public synchronized Object takeEvent() throws InterruptedException {
while (events.isEmpty()) {
wait();
}
return events.remove(0);
}
@Override public synchronized boolean onRequest(int streamId, List<Header> requestHeaders) {
assertEquals(2, streamId);
events.add(requestHeaders);
notifyAll();
return false;
}
@Override public synchronized boolean onHeaders(
int streamId, List<Header> responseHeaders, boolean last) {
assertEquals(2, streamId);
assertTrue(last);
events.add(responseHeaders);
notifyAll();
return false;
}
@Override public synchronized boolean onData(
int streamId, BufferedSource source, int byteCount, boolean last) {
events.add(new AssertionError("onData"));
notifyAll();
return false;
}
@Override public synchronized void onReset(int streamId, ErrorCode errorCode) {
events.add(new AssertionError("onReset"));
notifyAll();
}
}
}
|
|
package com.cruzj6.mha.dataManagement;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.database.CursorIndexOutOfBoundsException;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteOpenHelper;
import android.provider.ContactsContract;
import android.util.Log;
import com.cruzj6.mha.helpers.NotificationItemsManager;
import com.cruzj6.mha.models.AppointmentItem;
import com.cruzj6.mha.models.Days;
import com.cruzj6.mha.models.MissedPillContainer;
import com.cruzj6.mha.models.PillItem;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
/**
* Created by Joey on 5/23/16.
* Class for managing the database of appointments and medication using the DatabaseContract
*/
public class DatabaseManager extends SQLiteOpenHelper{
private final static String TAG = "DatabaseManager";
public final static String DATABASE_NAME = "MedApp.db";
public final static int DATABASE_VER = 4;
private Context context;
public DatabaseManager(Context context) {
super(context, DATABASE_NAME, null, DATABASE_VER);
this.context = context;
}
/**
* Removes an Appointment from the database
*/
public void deleteAppointment(long apptId)
{
SQLiteDatabase db = getWritableDatabase();
db.delete(DatabaseContract.AppointmentEntry.TABLE_NAME,
DatabaseContract.AppointmentEntry._ID + "=" + apptId, null);
db.close();
}
/**
* Removes a pill from the database
* @param pillId
*/
public void deletePill(long pillId)
{
PillItem pi = loadPillItemById(pillId);
//Get rid of the notifications
NotificationItemsManager.removeOldPillNotifications(pi, context);
SQLiteDatabase db = getWritableDatabase();
db.delete(DatabaseContract.PillEntry.TABLE_NAME,
DatabaseContract.PillEntry._ID + "=" + pillId, null);
db.close();
}
public List<MissedPillContainer> loadAllMissedPills()
{
List<MissedPillContainer> loadedItems = new ArrayList<>();
//Get all of the rows in the table for missed pills/meds
SQLiteDatabase database = getReadableDatabase();
Cursor c = database.query(DatabaseContract.MissedPillEntry.TABLE_NAME, null, null, null, null, null, null);
//Go through each row
c.moveToFirst();
while(!c.isAfterLast())
{
try {
MissedPillContainer newItem = missedPillContainerFromCursor(c);
loadedItems.add(newItem);
}
catch(IllegalArgumentException e)
{
Log.e(TAG, "Could not load missed pill from DB: loadAppointmentItems(): \n" + e.getMessage());
e.printStackTrace();
}
c.moveToNext();
}
database.close();
return loadedItems;
}
private MissedPillContainer missedPillContainerFromCursor(Cursor c)
{
MissedPillContainer newItem;
int colIndex = c.getColumnIndexOrThrow(DatabaseContract.MissedPillEntry._ID);
long pk = c.getLong(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.MissedPillEntry.COLUMN_NAME_TIME_MISSED);
long missedDate = c.getLong(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.MissedPillEntry.COLUMN_NAME_PILL_NAME);
String pillName = c.getString(colIndex);
newItem = new MissedPillContainer(pillName, pk, missedDate);
return newItem;
}
/**
* Adds a missed medication to the database (missed time is when this is called)
* @param missedPill
* @return Returns DB table id of the missed pill instance (Separate from pillId!!)
*/
public long addMissedPill(PillItem missedPill)
{
SQLiteDatabase db = getWritableDatabase();
Date missedTime = new Date();
//Insert into database
ContentValues newData = new ContentValues();
newData.put(DatabaseContract.MissedPillEntry.COLUMN_NAME_PILL_NAME, missedPill.getTitle());
newData.put(DatabaseContract.MissedPillEntry.COLUMN_NAME_TIME_MISSED, missedTime.getTime()/1000);
long id = db.insert(DatabaseContract.MissedPillEntry.TABLE_NAME, null, newData);
db.close();
return id;
}
public void removeMissedPill(long missedPillId)
{
SQLiteDatabase db = getWritableDatabase();
db.delete(DatabaseContract.MissedPillEntry.TABLE_NAME,
DatabaseContract.MissedPillEntry._ID + "=" + missedPillId, null);
db.close();
}
/**
* Adds(if no id in AppointmentItem) or modifies an appointment in the database
* @param apptItem
* @return the appointment's id
*/
public long saveAppointment(AppointmentItem apptItem)
{
SQLiteDatabase db = getWritableDatabase();
//Build the entry
ContentValues newData = new ContentValues();
newData.put(DatabaseContract.AppointmentEntry.COLUMN_NAME_DR_NAME, apptItem.getAppointmentTitle());
newData.put(DatabaseContract.AppointmentEntry.COLUMN_NAME_APPT_DATE, apptItem.getApptDate());
newData.put(DatabaseContract.AppointmentEntry.COLUMN_NAME_REQ_LABWORK, apptItem.getRequiresLabWork());
newData.put(DatabaseContract.AppointmentEntry.COLUMN_NAME_LABWORK_DAYS_BEFORE, apptItem.getLabworkDaysBefore());
newData.put(DatabaseContract.AppointmentEntry.COLUMN_NAME_REMIND_DAYS_BEFORE, apptItem.getRemindDaysBefore());
newData.put(DatabaseContract.AppointmentEntry.COLUMN_NAME_NOTES,apptItem.getNotes());
if(apptItem.getApptId() == -1)
//Insert the entry if none exists with that id
return db.insert(DatabaseContract.AppointmentEntry.TABLE_NAME, null, newData);
else
//If already exists update it
db.update(DatabaseContract.AppointmentEntry.TABLE_NAME, newData,
DatabaseContract.AppointmentEntry._ID + "=" + apptItem.getApptId(), null);
db.close();
return apptItem.getApptId();
}
/**
*Adds(If id in pillItem) or modifies a medication in the database
* @param pillItem
* @return id of the medication
*/
public long savePill(PillItem pillItem)
{
SQLiteDatabase db = getWritableDatabase();
//Build the entry
ContentValues newData = new ContentValues();
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_PILL_NAME, pillItem.getTitle());
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_INSTR, pillItem.getInstr());
//So we can iterate through easier
String[] dayDBContract = {
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_S,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_M,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_T,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_W,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_R,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_F,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_Sa,
};
//Add the times for each day
for(int i = 0; i < dayDBContract.length; i++)
{
long[] timesAsLongs = pillItem.getTimesForDay(i);
//If there are times for this day
if(timesAsLongs != null) {
StringBuilder timesBuilder = new StringBuilder();
//Build our comma seperated string from the array
for (long time : timesAsLongs) {
String toAppend = time + ",";
timesBuilder.append(toAppend);
}
//Get rid of hanging comma
timesBuilder.deleteCharAt(timesBuilder.length() - 1);
//Finally add it
newData.put(dayDBContract[i], timesBuilder.toString());
}
else newData.putNull(dayDBContract[i]);
}
//Final data
if(!pillItem.getIsEndByDate()) {
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_UNTIL_DATE, -1);
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_DURATION, pillItem.getDuration());
}
else {
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_DURATION, -1);
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_UNTIL_DATE, pillItem.getUntilDate());
}
newData.put(DatabaseContract.PillEntry.COLUMN_NAME_REFILL_DATE, pillItem.getRefillDate());
if(pillItem.getPillId() == -1)
//Insert the entry if none exists with that id
return db.insert(DatabaseContract.PillEntry.TABLE_NAME, null, newData);
else
//If already exists update it
db.update(DatabaseContract.PillEntry.TABLE_NAME, newData,
DatabaseContract.PillEntry._ID + "=" + pillItem.getPillId(), null);
db.close();
return pillItem.getPillId();
}
/**
*Load items from the appointments table in to database into
* AppointmentItem's and return the list
* @return list of all AppointmentItems in DB
*/
public List<AppointmentItem> loadAppointmentItems()
{
List<AppointmentItem> loadedItems = new ArrayList<>();
//Get all of the rows in the table for appointments
SQLiteDatabase database = getReadableDatabase();
Cursor c = database.query(DatabaseContract.AppointmentEntry.TABLE_NAME, null, null, null, null, null, null);
//Go through each row
c.moveToFirst();
while(!c.isAfterLast())
{
try {
AppointmentItem newItem = appointmentItemFromCursor(c);
loadedItems.add(newItem);
}
catch(IllegalArgumentException e)
{
Log.e(TAG, "Could not load appointment from DB: loadAppointmentItems(): \n" + e.getMessage());
e.printStackTrace();
}
c.moveToNext();
}
database.close();
return loadedItems;
}
/**
* Gets the AppointmentItem of the appointment with the given id
* @param id
* @return The AppointmentItem for the appointment with the id
*/
public AppointmentItem loadAppointmentById(Long id)
{
SQLiteDatabase database = getReadableDatabase();
Cursor c = database.rawQuery("SELECT * FROM " + DatabaseContract.AppointmentEntry.TABLE_NAME
+ " WHERE " + DatabaseContract.AppointmentEntry._ID + "=" + id, null);
c.moveToFirst();
AppointmentItem apptItem = appointmentItemFromCursor(c);
database.close();
return apptItem;
}
/**
* Loads a pillItem from DB by its id
* @param id
* @return loaded PillItem
*/
public PillItem loadPillItemById(long id)
{
SQLiteDatabase database = getReadableDatabase();
Cursor c = database.rawQuery("SELECT * FROM " + DatabaseContract.PillEntry.TABLE_NAME
+ " WHERE " + DatabaseContract.PillEntry._ID + "=" + id, null);
c.moveToFirst();
try {
PillItem pillItem = pillItemFromCursor(c);
database.close();
return pillItem;
}
catch(IllegalArgumentException e)
{
Log.e(TAG, "Could not load pillItem from DB: loadPillItemById(): \n" + e.getMessage());
e.printStackTrace();
}
database.close();
return null;
}
/**
* Loads all PillItems(Medications) from the database
* @return list of PillItems for all medications in the database
*/
public List<PillItem> loadPillItems()
{
List<PillItem> pillItems = new ArrayList<>();
//Get all of the rows in the table for appointments
SQLiteDatabase database = getReadableDatabase();
Cursor c = database.query(DatabaseContract.PillEntry.TABLE_NAME, null, null, null, null, null, null);
//Go through each row
c.moveToFirst();
while(!c.isAfterLast())
{
try {
PillItem newItem = pillItemFromCursor(c);
pillItems.add(newItem);
}
catch(IllegalArgumentException e)
{
Log.e(TAG, "Could not load PillItem from DB: loadPillItems(): \n" + e.getMessage());
e.printStackTrace();
}
c.moveToNext();
}
database.close();
return pillItems;
}
private PillItem pillItemFromCursor(Cursor c)
{
PillItem newItem;
try {
int colIndex = c.getColumnIndexOrThrow(DatabaseContract.PillEntry._ID);
long pk = c.getLong(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.PillEntry.COLUMN_NAME_PILL_NAME);
String title = c.getString(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.PillEntry.COLUMN_NAME_INSTR);
String instr = c.getString(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.PillEntry.COLUMN_NAME_DURATION);
int duration = c.getInt(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.PillEntry.COLUMN_NAME_UNTIL_DATE);
long untilDate = c.getLong(colIndex);
colIndex = c.getColumnIndexOrThrow(DatabaseContract.PillEntry.COLUMN_NAME_REFILL_DATE);
long refillDate = c.getLong(colIndex);
if (duration == -1)
newItem = new PillItem(title, instr, untilDate, refillDate);
else
newItem = new PillItem(title, instr, duration, refillDate);
String[] dayDBContract = {
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_S,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_M,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_T,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_W,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_R,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_F,
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_Sa,
};
//Now set up the times to take for each day
for (int i = 0; i < dayDBContract.length; i++) {
colIndex = c.getColumnIndexOrThrow(dayDBContract[i]);
String timesToTake = c.getString(colIndex);
if (timesToTake != null && !timesToTake.equals("")) {
String[] timesArray = timesToTake.split(",");
long[] timesAsLongs = new long[timesArray.length];
//Convert times into longs and store
for (int j = 0; j < timesArray.length; j++) {
timesAsLongs[j] = Long.parseLong(timesArray[j]);
}
//Set the times for the item for that day
newItem.setTimesForDay(i, timesAsLongs);
}
}
newItem.setPillId(pk);
}catch (CursorIndexOutOfBoundsException e)
{
e.printStackTrace();
return null;
}
return newItem;
}
/**
* Load all of the medications for a specific day
* @param day
* @return List of PillItems for the specified day
*/
public List<PillItem> loadPillsForDay(int day)
{
List<PillItem> pills = new ArrayList<>();
String dbDayName = "";
//Get the string for the column name we want to query for
switch(day)
{
case 0:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_S;
break;
case 1:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_M;
break;
case 2:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_T;
break;
case 3:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_W;
break;
case 4:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_R;
break;
case 5:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_F;
break;
case 6:
dbDayName = DatabaseContract.PillEntry.COLUMN_NAME_TIMES_Sa;
break;
}
SQLiteDatabase database = getReadableDatabase();
Cursor c = database.rawQuery("SELECT * FROM " + DatabaseContract.PillEntry.TABLE_NAME
+ " WHERE " + dbDayName + " IS NOT NULL", null);
c.moveToFirst();
while(!c.isAfterLast())
{
try {
PillItem pillItem = pillItemFromCursor(c);
pills.add(pillItem);
}
catch(IllegalArgumentException e)
{
Log.e(TAG, "Could not load PillItem from DB: loadPillsForDay(): \n" + e.getMessage());
e.printStackTrace();
}
c.moveToNext();
}
database.close();
return pills;
}
private AppointmentItem appointmentItemFromCursor(Cursor c)
{
AppointmentItem newItem;
int colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry._ID);
long pk = c.getLong(colIndex);
Log.v("TEST PRIMARY KEY", "" + pk);
//Get each component
//Get the drName
colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry.COLUMN_NAME_DR_NAME);
String drName = c.getString(colIndex);
//Get the appointment date (Unix Time)
colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry.COLUMN_NAME_APPT_DATE);
long apptDate = c.getLong(colIndex);
///Get days before appointment user needs to do lab work
colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry.COLUMN_NAME_LABWORK_DAYS_BEFORE);
long labworkDaysBefore = c.getLong(colIndex);
//Get the notes for the appointment
colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry.COLUMN_NAME_NOTES);
String notes = c.getString(colIndex);
//Get the number of days before the appt to remind the user
colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry.COLUMN_NAME_REMIND_DAYS_BEFORE);
long remindDaysBefore = c.getLong(colIndex);
//Get if requires labwork
colIndex = c.getColumnIndexOrThrow(DatabaseContract.AppointmentEntry.COLUMN_NAME_REQ_LABWORK);
String labWork = c.getString(colIndex);
Log.v("DATABASEMANAGER", "LabWork Req: " + labWork);
//Check if needs labwork
if(labWork.equals("1"))
{
newItem = new AppointmentItem(drName, apptDate, remindDaysBefore, notes, labworkDaysBefore);
}
else
{
newItem = new AppointmentItem(drName, apptDate, remindDaysBefore, notes);
}
//set id and add to list
newItem.setApptId(pk);
return newItem;
}
@Override
public void onCreate(SQLiteDatabase db) {
//Create the database tables
//Create the appointments table
db.execSQL("CREATE TABLE " + DatabaseContract.AppointmentEntry.TABLE_NAME +
" (" +
DatabaseContract.AppointmentEntry._ID + " INTEGER PRIMARY KEY," +
DatabaseContract.AppointmentEntry.COLUMN_NAME_DR_NAME + " TEXT," +
DatabaseContract.AppointmentEntry.COLUMN_NAME_APPT_DATE + " INTEGER," +
DatabaseContract.AppointmentEntry.COLUMN_NAME_REQ_LABWORK + " TEXT," +
DatabaseContract.AppointmentEntry.COLUMN_NAME_LABWORK_DAYS_BEFORE + " INTEGER," +
DatabaseContract.AppointmentEntry.COLUMN_NAME_REMIND_DAYS_BEFORE + " INTEGER," +
DatabaseContract.AppointmentEntry.COLUMN_NAME_NOTES + " TEXT"
+ ")"
);
//Create the pills table
db.execSQL("CREATE TABLE " + DatabaseContract.PillEntry.TABLE_NAME +
" (" +
DatabaseContract.PillEntry._ID + " INTEGER PRIMARY KEY," +
DatabaseContract.PillEntry.COLUMN_NAME_PILL_NAME + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_DURATION + " INTEGER," +
DatabaseContract.PillEntry.COLUMN_NAME_UNTIL_DATE + " INTEGER," +
DatabaseContract.PillEntry.COLUMN_NAME_REFILL_DATE + " INTEGER," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_S + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_M + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_T + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_W + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_R + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_F + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_TIMES_Sa + " TEXT," +
DatabaseContract.PillEntry.COLUMN_NAME_INSTR + " TEXT"
+ ")"
);
//Missed Pills table
db.execSQL("CREATE TABLE " + DatabaseContract.MissedPillEntry.TABLE_NAME +
" (" +
DatabaseContract.MissedPillEntry._ID + " INTEGER PRIMARY KEY," +
DatabaseContract.MissedPillEntry.COLUMN_NAME_PILL_NAME + " TEXT," +
DatabaseContract.MissedPillEntry.COLUMN_NAME_TIME_MISSED + " INTEGER" +
")"
);
}
@Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
if(oldVersion < 4) {
//Missed Pills table
//Missed Pills table
db.execSQL("CREATE TABLE " + DatabaseContract.MissedPillEntry.TABLE_NAME +
" (" +
DatabaseContract.MissedPillEntry._ID + " INTEGER PRIMARY KEY," +
DatabaseContract.MissedPillEntry.COLUMN_NAME_PILL_NAME + " TEXT," +
DatabaseContract.MissedPillEntry.COLUMN_NAME_TIME_MISSED + " INTEGER" +
")"
);
}
}
}
|
|
package ru.r2cloud.jradio.jpeg.validator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import javax.imageio.plugins.jpeg.JPEGHuffmanTable;
import org.jtransforms.dct.DoubleDCT_2D;
public class DataUnitDecoder {
private static final int TWO_BYTES_POSSIBLE_VALUES = (int) (Math.pow(2, 8) * Math.pow(2, 8));
private static final int BIT_IN_TWO_BYTES = 16;
private static final DoubleDCT_2D DCTTransform = new DoubleDCT_2D(8, 8);
private static final byte[] ZIGZAG_INDEXES = new byte[] { 0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53, 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, 21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63 };
// mapping between codeword and AcCode
// spatial table, stores less AcCode objects than 65536
private AcCode[] acYCodes;
private DcCode[] dcYCodes;
private AcCode[] acCbcrCodes;
private DcCode[] dcCbcrCodes;
// mapping between all possible 2bytes and the codeword. Saves 12 bitwise shift and compare operations
private AcCode[] acYLookup;
private AcCode[] acCbcrLookup;
// lookup table for max 2 bytes. category lookup table has max 9 bits (i.e. 8 bit + 1 bit)
// it saves us 12 bitwise shift and compare operations (worst case scenario)
private int[] dcYLookup;
private int[] dcCbcrLookup;
public DataUnitDecoder(int[] yDqt, int[] cbcrDqt, JPEGHuffmanTable dc0, JPEGHuffmanTable dc1, JPEGHuffmanTable ac0, JPEGHuffmanTable ac1) {
this.yDqt = yDqt;
this.cbcrDqt = cbcrDqt;
dcYCodes = createTable(dc0);
dcYLookup = setupDcLookup(dcYCodes);
acYCodes = createAcTable(ac0);
acYLookup = setupAcLookup(acYCodes);
dcCbcrCodes = createTable(dc1);
dcCbcrLookup = setupDcLookup(dcCbcrCodes);
acCbcrCodes = createAcTable(ac1);
acCbcrLookup = setupAcLookup(acCbcrCodes);
}
static final int PIXELS_PER_DU = 8;
private byte[] data;
private int currentBitIndex = 0;
private boolean hasNext = false;
private final int[] currentPixels = new int[PIXELS_PER_DU * PIXELS_PER_DU];
private final double[] dct = new double[currentPixels.length];
private int[] yDqt;
private int[] cbcrDqt;
private double[] previousDc = new double[3];
public boolean hasNext(boolean isYComponent, boolean isCbComponent, boolean isCrComponent) throws IOException {
int previousBitIndex = currentBitIndex;
hasNext = hasNextInternal(isYComponent, isCbComponent, isCrComponent);
// reset bit index to the beginning of the data unit
if (!hasNext) {
currentBitIndex = previousBitIndex;
}
return hasNext;
}
private boolean hasNextInternal(boolean isYComponent, boolean isCbComponent, boolean isCrComponent) throws IOException {
int index = peekNext(BIT_IN_TWO_BYTES);
if (index < 0) {
return false;
}
int dcCategory = lookupDc(isYComponent, index);
if (dcCategory == -1) {
throw new IOException("invalid dc category");
}
int codeWordLength = getDcCodeWordLength(isYComponent, dcCategory);
currentBitIndex += codeWordLength;
int dcBitmask = peekNext(dcCategory);
if (dcBitmask < 0) {
return false;
}
// dc category also defines number of bits in the diff value
currentBitIndex += dcCategory;
double[] zigzagDct = new double[dct.length];
zigzagDct[0] = mapBitmaskToValue(dcCategory, dcBitmask);
// for the first MCU previous dc should be 0
int previousDcIndex;
if (isYComponent) {
previousDcIndex = 0;
} else if (isCbComponent) {
previousDcIndex = 1;
} else if (isCrComponent) {
previousDcIndex = 2;
} else {
throw new IllegalArgumentException("invalid component");
}
zigzagDct[0] += previousDc[previousDcIndex];
// decode AC
for (int i = 1; i < 64; i++) {
index = peekNext(BIT_IN_TWO_BYTES);
if (index < 0) {
return false;
}
AcCode code = lookupAc(isYComponent, index);
if (code == null) {
throw new IOException("invalid ac code");
}
currentBitIndex += code.getCodeLength();
// EOB
if (code.getRun() == 0 && code.getCategory() == 0) {
break;
}
i += code.getRun();
// not ZRL
if (code.getCategory() != 0) {
int acBitmask = peekNext(code.getCategory());
if (acBitmask < 0) {
return false;
}
currentBitIndex += code.getCategory();
zigzagDct[i] = mapBitmaskToValue(code.getCategory(), acBitmask);
}
}
for (int i = 0; i < zigzagDct.length; i++) {
dct[i] = zigzagDct[ZIGZAG_INDEXES[i]] * getDqt(isYComponent)[ZIGZAG_INDEXES[i]];
}
DCTTransform.inverse(dct, true);
for (int i = 0; i < dct.length; i++) {
currentPixels[i] = (int) (Math.round(dct[i] + 128));
if (currentPixels[i] < 0) {
currentPixels[i] = 0;
} else if (currentPixels[i] > 255) {
currentPixels[i] = 255;
}
}
// set previous only when DU fully read
previousDc[previousDcIndex] = zigzagDct[0];
return true;
}
public void append(byte[] payload) {
int fullyProcessedBytes = currentBitIndex / 8;
int prependFromCurrentData = data.length - fullyProcessedBytes;
byte[] newData = new byte[prependFromCurrentData + payload.length];
System.arraycopy(data, fullyProcessedBytes, newData, 0, prependFromCurrentData);
System.arraycopy(payload, 0, newData, prependFromCurrentData, payload.length);
currentBitIndex = currentBitIndex % 8;
this.data = newData;
}
public void resetToTheNextByte() {
int overflow = currentBitIndex % 8;
if (overflow > 0) {
currentBitIndex += (8 - currentBitIndex % 8);
}
// next packet starts with DC = 0
for (int i = 0; i < previousDc.length; i++) {
previousDc[i] = 0.0;
}
}
public void reset(byte[] data) {
hasNext = false;
currentBitIndex = 0;
for (int i = 0; i < previousDc.length; i++) {
previousDc[i] = 0.0;
}
this.data = data;
}
// FIXME append 2 bytes at the last packet just
private int peekNext(int numberOfBits) {
if (((currentBitIndex + numberOfBits) / 8) >= data.length) {
// numberOfBits is always less than 31
return -1;
}
int result = 0;
for (int i = 0; i < numberOfBits; i++) {
int bitIndex = currentBitIndex + i;
int currentByteIndex = bitIndex >> 3;
// peeking into next 16 bits might overflow byte array
// this is fine, since not all bits will be used for matching codeword
// stuff the overflow with zeroes
if (currentByteIndex >= data.length) {
result = result << 1;
continue;
}
int bit;
if (((data[currentByteIndex] & 0xFF) & (1 << (7 - (bitIndex & 7)))) != 0) {
bit = 1;
} else {
bit = 0;
}
result = (result << 1) | bit;
}
return result;
}
private int[] getDqt(boolean isYComponent) {
if (isYComponent) {
return yDqt;
} else {
return cbcrDqt;
}
}
private int getDcCodeWordLength(boolean isYComponent, int dcCategory) {
DcCode[] lookupTable;
if (isYComponent) {
lookupTable = dcYCodes;
} else {
lookupTable = dcCbcrCodes;
}
return lookupTable[dcCategory].getCodeLength();
}
private int lookupDc(boolean isYComponent, int index) {
int[] lookupTable;
if (isYComponent) {
lookupTable = dcYLookup;
} else {
lookupTable = dcCbcrLookup;
}
return lookupTable[index];
}
private AcCode lookupAc(boolean isYComponent, int index) {
AcCode[] lookupTable;
if (isYComponent) {
lookupTable = acYLookup;
} else {
lookupTable = acCbcrLookup;
}
return lookupTable[index];
}
public int[] next() {
if (!hasNext) {
throw new NoSuchElementException();
}
return currentPixels;
}
private static int[] setupDcLookup(DcCode[] dcCodes) {
Map<Integer, DcCode> dcYIndex = indexByCode(dcCodes);
int[] result = new int[TWO_BYTES_POSSIBLE_VALUES];
for (int i = 0; i < TWO_BYTES_POSSIBLE_VALUES; i++) {
DcCode code = findCode(i, dcYIndex);
// shouldn't happen. defensive check
if (code != null) {
result[i] = code.getCategory();
} else {
result[i] = -1;
}
}
return result;
}
private static AcCode[] setupAcLookup(AcCode[] acCodes) {
Map<Integer, AcCode> acYIndex = indexByCode(acCodes);
AcCode[] result = new AcCode[TWO_BYTES_POSSIBLE_VALUES];
for (int i = 0; i < TWO_BYTES_POSSIBLE_VALUES; i++) {
result[i] = findCode(i, acYIndex);
}
return result;
}
private static <T extends DcCode> T findCode(int twoByteIndex, Map<Integer, T> index) {
// iterate from shorter codewords to longer
for (int j = BIT_IN_TWO_BYTES; j >= 0; j--) {
int codeword = twoByteIndex >> j;
T code = index.get(codeword);
if (code == null) {
continue;
}
// make sure we index codeword of the right length
if (code.getCodeLength() != BIT_IN_TWO_BYTES - j) {
continue;
}
return code;
}
return null;
}
private static <T extends DcCode> Map<Integer, T> indexByCode(T[] list) {
Map<Integer, T> result = new HashMap<>();
for (T cur : list) {
result.put(cur.getCodeword(), cur);
}
return result;
}
private static DcCode[] createTable(JPEGHuffmanTable table) {
List<DcCode> result = new ArrayList<>();
int code = 0;
int lengthIndex = 0;
for (int i = 0; i < table.getLengths().length; i++) {
for (int j = 0; j < table.getLengths()[i]; j++) {
DcCode cur = new DcCode();
cur.setCodeLength(i + 1);
cur.setCodeword(code);
cur.setCategory(table.getValues()[lengthIndex]);
result.add(cur);
code++;
lengthIndex++;
}
code <<= 1;
}
return result.toArray(new DcCode[0]);
}
private static AcCode[] createAcTable(JPEGHuffmanTable table) {
List<AcCode> result = new ArrayList<>();
int code = 0;
int lengthIndex = 0;
for (int i = 0; i < table.getLengths().length; i++) {
for (int j = 0; j < table.getLengths()[i]; j++) {
int runLengthAndCategory = table.getValues()[lengthIndex];
AcCode cur = new AcCode();
cur.setCodeLength(i + 1);
cur.setCodeword(code);
cur.setRun(runLengthAndCategory >> 4);
cur.setCategory(runLengthAndCategory & 0xF);
result.add(cur);
code++;
lengthIndex++;
}
code <<= 1;
}
return result.toArray(new AcCode[0]);
}
private static int mapBitmaskToValue(int dcCategory, int bitmask) {
boolean startWith1 = (bitmask >> (dcCategory - 1)) != 0;
int maxval = (1 << dcCategory) - 1;
if (startWith1) {
return bitmask;
} else {
return bitmask - maxval;
}
}
}
|
|
// CharExtractionPanel.java
// Copyright (c) 2010 William Whitney
// All rights reserved.
// This software is released under the BSD license.
// Please see the accompanying LICENSE.txt for details.
package net.sourceforge.javaocr.gui.characterExtractor;
import java.awt.BorderLayout;
import java.awt.FlowLayout;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import java.util.logging.Logger;
import javax.swing.BoxLayout;
import javax.swing.JButton;
import javax.swing.JFileChooser;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JTextArea;
import javax.swing.JTextField;
import javax.swing.border.TitledBorder;
import net.sourceforge.javaocr.gui.GUIController;
/**
* Provides a panel to save out individual characters as images.
* @author William Whitney
*/
public class CharExtractionPanel extends JPanel
{
public static final long serialVersionUID = 0;
private JTextField imageLoc;
private JTextField outputDir;
private final GUIController guiController;
private JTextField pixelSize;
public CharExtractionPanel(GUIController guiController)
{
this.guiController = guiController;
//Set Layout
this.setLayout(new BorderLayout());
//Add Description
this.add(getDescription(), BorderLayout.NORTH);
//Get Control Panel
this.add(getFileAndFolderSelectionPanel(), BorderLayout.CENTER);
//Add Extract button
this.add(getExtractButton(), BorderLayout.EAST);
}
private JTextArea getDescription()
{
TitledBorder border = new TitledBorder("Description");
JTextArea textArea = new JTextArea();
textArea.setBorder(border);
textArea.setEditable(false);
textArea.setBackground(this.getBackground());
String desc = "This feature will isolate characters within an image and save them individually\nto the selected output directory.";
textArea.setText(desc);
return textArea;
}
private JPanel getFileAndFolderSelectionPanel()
{
JPanel panel = new JPanel();
panel.setLayout(new FlowLayout(FlowLayout.RIGHT));
//Create Image Selector
panel.add(getImageSelectPanel());
//Create Ouput Dir Selector
panel.add(getOutputDirSelector());
//Create pixel selection panel
panel.add(getPixelSizeSelectionPanel());
return panel;
}
private JPanel getPixelSizeSelectionPanel()
{
JPanel panel = new JPanel();
panel.setLayout(new FlowLayout(FlowLayout.LEFT));
panel.add(new JLabel("Select Output Character Size In Pixels: "));
pixelSize = new JTextField(5);
pixelSize.setText("75");
panel.add(pixelSize);
return panel;
}
private JPanel getExtractButton()
{
JPanel btnPanel = new JPanel();
btnPanel.setLayout(new FlowLayout());
JButton btn = new JButton("Extract Characters");
btn.addActionListener(new ActionListener()
{
public void actionPerformed(ActionEvent e)
{
boolean isOk = true;
int std_size = -1;
File imageFile = new File(imageLoc.getText());
File outDir = new File(outputDir.getText());
if (!imageFile.exists() || !outDir.exists())
{
JOptionPane.showMessageDialog(null, "Check image or output directory!");
isOk = false;
}
try
{
std_size = Integer.parseInt(pixelSize.getText());
}
catch (Exception err)
{
JOptionPane.showMessageDialog(null, "Input valid size in pixles!");
isOk = false;
}
if (isOk)
{
guiController.extractChars(imageFile, outDir, std_size, std_size);
JOptionPane.showMessageDialog(null, "Done!");
}
}
});
btnPanel.add(btn);
return btnPanel;
}
private JPanel getImageSelectPanel()
{
JPanel imageSel = new JPanel();
imageSel.setLayout(new BoxLayout(imageSel, BoxLayout.X_AXIS));
JLabel imgLable = new JLabel("Select Image: ");
imageSel.add(imgLable);
imageLoc = new JTextField(40);
imageSel.add(imageLoc);
JButton button = new JButton("Select");
button.addActionListener(getImageSelectAction());
imageSel.add(button);
return imageSel;
}
private JPanel getOutputDirSelector()
{
JPanel dirSelectPanel = new JPanel();
dirSelectPanel.setLayout(new BoxLayout(dirSelectPanel, BoxLayout.X_AXIS));
JLabel dirLabel = new JLabel("Select Output Dir: ");
dirSelectPanel.add(dirLabel);
outputDir = new JTextField(40);
dirSelectPanel.add(outputDir);
JButton button = new JButton("Select");
button.addActionListener(getOutDirSelectAction());
dirSelectPanel.add(button);
return dirSelectPanel;
}
private ActionListener getImageSelectAction()
{
return new ActionListener()
{
public void actionPerformed(ActionEvent e)
{
JFileChooser chooser = new JFileChooser();
chooser.setFileSelectionMode(JFileChooser.FILES_ONLY);
int returnVal = chooser.showOpenDialog(null);
if (returnVal == JFileChooser.APPROVE_OPTION)
{
imageLoc.setText(chooser.getSelectedFile().getAbsolutePath());
}
}
};
}
private ActionListener getOutDirSelectAction()
{
return new ActionListener()
{
public void actionPerformed(ActionEvent e)
{
JFileChooser chooser = new JFileChooser();
chooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
int returnVal = chooser.showOpenDialog(null);
if (returnVal == JFileChooser.APPROVE_OPTION)
{
outputDir.setText(chooser.getSelectedFile().getAbsolutePath());
}
}
};
}
private static final Logger LOG = Logger.getLogger(CharExtractionPanel.class.getName());
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.endpoint.dsl;
import javax.annotation.Generated;
import org.apache.camel.builder.EndpointConsumerBuilder;
import org.apache.camel.builder.EndpointProducerBuilder;
import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
/**
* Manage AWS EC2 instances using AWS SDK version 2.x.
*
* Generated by camel build tools - do NOT edit this file!
*/
@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
public interface AWS2EC2EndpointBuilderFactory {
/**
* Builder for endpoint for the AWS 2 Elastic Compute Cloud (EC2) component.
*/
public interface AWS2EC2EndpointBuilder extends EndpointProducerBuilder {
default AdvancedAWS2EC2EndpointBuilder advanced() {
return (AdvancedAWS2EC2EndpointBuilder) this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder accessKey(String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* To use a existing configured AmazonEC2Client as client.
*
* The option is a:
* <code>software.amazon.awssdk.services.ec2.Ec2Client</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder amazonEc2Client(Object amazonEc2Client) {
doSetProperty("amazonEc2Client", amazonEc2Client);
return this;
}
/**
* To use a existing configured AmazonEC2Client as client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.services.ec2.Ec2Client</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder amazonEc2Client(String amazonEc2Client) {
doSetProperty("amazonEc2Client", amazonEc2Client);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AWS2EC2EndpointBuilder lazyStartProducer(
boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AWS2EC2EndpointBuilder lazyStartProducer(
String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The operation to perform. It can be createAndRunInstances,
* startInstances, stopInstances, terminateInstances, describeInstances,
* describeInstancesStatus, rebootInstances, monitorInstances,
* unmonitorInstances, createTags or deleteTags.
*
* The option is a:
* <code>org.apache.camel.component.aws2.ec2.AWS2EC2Operations</code>
* type.
*
* Required: true
* Group: producer
*/
default AWS2EC2EndpointBuilder operation(AWS2EC2Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The operation to perform. It can be createAndRunInstances,
* startInstances, stopInstances, terminateInstances, describeInstances,
* describeInstancesStatus, rebootInstances, monitorInstances,
* unmonitorInstances, createTags or deleteTags.
*
* The option will be converted to a
* <code>org.apache.camel.component.aws2.ec2.AWS2EC2Operations</code>
* type.
*
* Required: true
* Group: producer
*/
default AWS2EC2EndpointBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AWS2EC2EndpointBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AWS2EC2EndpointBuilder pojoRequest(String pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* To define a proxy host when instantiating the EC2 client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder proxyHost(String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the EC2 client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder proxyPort(Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy port when instantiating the EC2 client.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder proxyPort(String proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the EC2 client.
*
* The option is a: <code>software.amazon.awssdk.core.Protocol</code>
* type.
*
* Default: HTTPS
* Group: producer
*/
default AWS2EC2EndpointBuilder proxyProtocol(Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* To define a proxy protocol when instantiating the EC2 client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: producer
*/
default AWS2EC2EndpointBuilder proxyProtocol(String proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* The region in which EC2 client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder region(String region) {
doSetProperty("region", region);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*/
default AWS2EC2EndpointBuilder secretKey(String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AWS2EC2EndpointBuilder trustAllCertificates(
boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AWS2EC2EndpointBuilder trustAllCertificates(
String trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
}
/**
* Advanced builder for endpoint for the AWS 2 Elastic Compute Cloud (EC2)
* component.
*/
public interface AdvancedAWS2EC2EndpointBuilder
extends
EndpointProducerBuilder {
default AWS2EC2EndpointBuilder basic() {
return (AWS2EC2EndpointBuilder) this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*/
default AdvancedAWS2EC2EndpointBuilder synchronous(boolean synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*/
default AdvancedAWS2EC2EndpointBuilder synchronous(String synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
}
/**
* Proxy enum for
* <code>org.apache.camel.component.aws2.ec2.AWS2EC2Operations</code> enum.
*/
enum AWS2EC2Operations {
createAndRunInstances,
startInstances,
stopInstances,
terminateInstances,
describeInstances,
describeInstancesStatus,
rebootInstances,
monitorInstances,
unmonitorInstances,
createTags,
deleteTags;
}
/**
* Proxy enum for <code>software.amazon.awssdk.core.Protocol</code> enum.
*/
enum Protocol {
HTTP,
HTTPS;
}
public interface AWS2EC2Builders {
/**
* AWS 2 Elastic Compute Cloud (EC2) (camel-aws2-ec2)
* Manage AWS EC2 instances using AWS SDK version 2.x.
*
* Category: cloud,management
* Since: 3.1
* Maven coordinates: org.apache.camel:camel-aws2-ec2
*
* Syntax: <code>aws2-ec2:label</code>
*
* Path parameter: label (required)
* Logical name
*
* @param path label
*/
default AWS2EC2EndpointBuilder aws2Ec2(String path) {
return AWS2EC2EndpointBuilderFactory.endpointBuilder("aws2-ec2", path);
}
/**
* AWS 2 Elastic Compute Cloud (EC2) (camel-aws2-ec2)
* Manage AWS EC2 instances using AWS SDK version 2.x.
*
* Category: cloud,management
* Since: 3.1
* Maven coordinates: org.apache.camel:camel-aws2-ec2
*
* Syntax: <code>aws2-ec2:label</code>
*
* Path parameter: label (required)
* Logical name
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path label
*/
default AWS2EC2EndpointBuilder aws2Ec2(String componentName, String path) {
return AWS2EC2EndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
static AWS2EC2EndpointBuilder endpointBuilder(
String componentName,
String path) {
class AWS2EC2EndpointBuilderImpl extends AbstractEndpointBuilder implements AWS2EC2EndpointBuilder, AdvancedAWS2EC2EndpointBuilder {
public AWS2EC2EndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new AWS2EC2EndpointBuilderImpl(path);
}
}
|
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/datastore/v1/query.proto
package com.google.datastore.v1;
/**
* <pre>
* A reference to a property relative to the kind expressions.
* </pre>
*
* Protobuf type {@code google.datastore.v1.PropertyReference}
*/
public final class PropertyReference extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:google.datastore.v1.PropertyReference)
PropertyReferenceOrBuilder {
// Use PropertyReference.newBuilder() to construct.
private PropertyReference(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private PropertyReference() {
name_ = "";
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return com.google.protobuf.UnknownFieldSet.getDefaultInstance();
}
private PropertyReference(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
int mutable_bitField0_ = 0;
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!input.skipField(tag)) {
done = true;
}
break;
}
case 18: {
java.lang.String s = input.readStringRequireUtf8();
name_ = s;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.google.datastore.v1.QueryProto.internal_static_google_datastore_v1_PropertyReference_descriptor;
}
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.datastore.v1.QueryProto.internal_static_google_datastore_v1_PropertyReference_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.datastore.v1.PropertyReference.class, com.google.datastore.v1.PropertyReference.Builder.class);
}
public static final int NAME_FIELD_NUMBER = 2;
private volatile java.lang.Object name_;
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
name_ = s;
return s;
}
}
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!getNameBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
}
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!getNameBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
}
memoizedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.datastore.v1.PropertyReference)) {
return super.equals(obj);
}
com.google.datastore.v1.PropertyReference other = (com.google.datastore.v1.PropertyReference) obj;
boolean result = true;
result = result && getName()
.equals(other.getName());
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.datastore.v1.PropertyReference parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.datastore.v1.PropertyReference parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.datastore.v1.PropertyReference parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.datastore.v1.PropertyReference parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.datastore.v1.PropertyReference parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static com.google.datastore.v1.PropertyReference parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static com.google.datastore.v1.PropertyReference parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.datastore.v1.PropertyReference parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static com.google.datastore.v1.PropertyReference parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static com.google.datastore.v1.PropertyReference parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(com.google.datastore.v1.PropertyReference prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* <pre>
* A reference to a property relative to the kind expressions.
* </pre>
*
* Protobuf type {@code google.datastore.v1.PropertyReference}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:google.datastore.v1.PropertyReference)
com.google.datastore.v1.PropertyReferenceOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.google.datastore.v1.QueryProto.internal_static_google_datastore_v1_PropertyReference_descriptor;
}
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.datastore.v1.QueryProto.internal_static_google_datastore_v1_PropertyReference_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.datastore.v1.PropertyReference.class, com.google.datastore.v1.PropertyReference.Builder.class);
}
// Construct using com.google.datastore.v1.PropertyReference.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
public Builder clear() {
super.clear();
name_ = "";
return this;
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return com.google.datastore.v1.QueryProto.internal_static_google_datastore_v1_PropertyReference_descriptor;
}
public com.google.datastore.v1.PropertyReference getDefaultInstanceForType() {
return com.google.datastore.v1.PropertyReference.getDefaultInstance();
}
public com.google.datastore.v1.PropertyReference build() {
com.google.datastore.v1.PropertyReference result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public com.google.datastore.v1.PropertyReference buildPartial() {
com.google.datastore.v1.PropertyReference result = new com.google.datastore.v1.PropertyReference(this);
result.name_ = name_;
onBuilt();
return result;
}
public Builder clone() {
return (Builder) super.clone();
}
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) {
return (Builder) super.setField(field, value);
}
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) {
return (Builder) super.addRepeatedField(field, value);
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.datastore.v1.PropertyReference) {
return mergeFrom((com.google.datastore.v1.PropertyReference)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.datastore.v1.PropertyReference other) {
if (other == com.google.datastore.v1.PropertyReference.getDefaultInstance()) return this;
if (!other.getName().isEmpty()) {
name_ = other.name_;
onChanged();
}
onChanged();
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.datastore.v1.PropertyReference parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (com.google.datastore.v1.PropertyReference) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object name_ = "";
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
name_ = value;
onChanged();
return this;
}
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
* <pre>
* The name of the property.
* If name includes "."s, it may be interpreted as a property name path.
* </pre>
*
* <code>optional string name = 2;</code>
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
name_ = value;
onChanged();
return this;
}
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return this;
}
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return this;
}
// @@protoc_insertion_point(builder_scope:google.datastore.v1.PropertyReference)
}
// @@protoc_insertion_point(class_scope:google.datastore.v1.PropertyReference)
private static final com.google.datastore.v1.PropertyReference DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.datastore.v1.PropertyReference();
}
public static com.google.datastore.v1.PropertyReference getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<PropertyReference>
PARSER = new com.google.protobuf.AbstractParser<PropertyReference>() {
public PropertyReference parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new PropertyReference(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<PropertyReference> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<PropertyReference> getParserForType() {
return PARSER;
}
public com.google.datastore.v1.PropertyReference getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
import org.apache.hadoop.thirdparty.protobuf.ServiceException;
@Private
public class ResourceManagerAdministrationProtocolPBClientImpl implements ResourceManagerAdministrationProtocol, Closeable {
private ResourceManagerAdministrationProtocolPB proxy;
public ResourceManagerAdministrationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ResourceManagerAdministrationProtocolPB.class,
ProtobufRpcEngine.class);
proxy = (ResourceManagerAdministrationProtocolPB)RPC.getProxy(
ResourceManagerAdministrationProtocolPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
throws YarnException, IOException {
RefreshQueuesRequestProto requestProto =
((RefreshQueuesRequestPBImpl)request).getProto();
try {
return new RefreshQueuesResponsePBImpl(
proxy.refreshQueues(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
throws YarnException, IOException {
RefreshNodesRequestProto requestProto =
((RefreshNodesRequestPBImpl)request).getProto();
try {
return new RefreshNodesResponsePBImpl(
proxy.refreshNodes(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
RefreshSuperUserGroupsConfigurationRequest request)
throws YarnException, IOException {
RefreshSuperUserGroupsConfigurationRequestProto requestProto =
((RefreshSuperUserGroupsConfigurationRequestPBImpl)request).getProto();
try {
return new RefreshSuperUserGroupsConfigurationResponsePBImpl(
proxy.refreshSuperUserGroupsConfiguration(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
RefreshUserToGroupsMappingsRequest request) throws YarnException,
IOException {
RefreshUserToGroupsMappingsRequestProto requestProto =
((RefreshUserToGroupsMappingsRequestPBImpl)request).getProto();
try {
return new RefreshUserToGroupsMappingsResponsePBImpl(
proxy.refreshUserToGroupsMappings(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshAdminAclsResponse refreshAdminAcls(
RefreshAdminAclsRequest request) throws YarnException, IOException {
RefreshAdminAclsRequestProto requestProto =
((RefreshAdminAclsRequestPBImpl)request).getProto();
try {
return new RefreshAdminAclsResponsePBImpl(
proxy.refreshAdminAcls(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
RefreshServiceAclsRequest request) throws YarnException,
IOException {
RefreshServiceAclsRequestProto requestProto =
((RefreshServiceAclsRequestPBImpl)request).getProto();
try {
return new RefreshServiceAclsResponsePBImpl(proxy.refreshServiceAcls(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public String[] getGroupsForUser(String user) throws IOException {
GetGroupsForUserRequestProto requestProto =
GetGroupsForUserRequestProto.newBuilder().setUser(user).build();
try {
GetGroupsForUserResponseProto responseProto =
proxy.getGroupsForUser(null, requestProto);
return (String[]) responseProto.getGroupsList().toArray(
new String[responseProto.getGroupsCount()]);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public UpdateNodeResourceResponse updateNodeResource(
UpdateNodeResourceRequest request) throws YarnException, IOException {
UpdateNodeResourceRequestProto requestProto =
((UpdateNodeResourceRequestPBImpl) request).getProto();
try {
return new UpdateNodeResourceResponsePBImpl(proxy.updateNodeResource(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshNodesResourcesResponse refreshNodesResources(
RefreshNodesResourcesRequest request) throws YarnException, IOException {
RefreshNodesResourcesRequestProto requestProto =
((RefreshNodesResourcesRequestPBImpl)request).getProto();
try {
return new RefreshNodesResourcesResponsePBImpl(
proxy.refreshNodesResources(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public AddToClusterNodeLabelsResponse addToClusterNodeLabels(
AddToClusterNodeLabelsRequest request) throws YarnException, IOException {
AddToClusterNodeLabelsRequestProto requestProto =
((AddToClusterNodeLabelsRequestPBImpl) request).getProto();
try {
return new AddToClusterNodeLabelsResponsePBImpl(
proxy.addToClusterNodeLabels(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels(
RemoveFromClusterNodeLabelsRequest request) throws YarnException,
IOException {
RemoveFromClusterNodeLabelsRequestProto requestProto =
((RemoveFromClusterNodeLabelsRequestPBImpl) request).getProto();
try {
return new RemoveFromClusterNodeLabelsResponsePBImpl(
proxy.removeFromClusterNodeLabels(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ReplaceLabelsOnNodeResponse replaceLabelsOnNode(
ReplaceLabelsOnNodeRequest request) throws YarnException, IOException {
ReplaceLabelsOnNodeRequestProto requestProto =
((ReplaceLabelsOnNodeRequestPBImpl) request).getProto();
try {
return new ReplaceLabelsOnNodeResponsePBImpl(proxy.replaceLabelsOnNodes(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest)
throws YarnException, IOException {
CheckForDecommissioningNodesRequestProto requestProto =
((CheckForDecommissioningNodesRequestPBImpl) checkForDecommissioningNodesRequest)
.getProto();
try {
return new CheckForDecommissioningNodesResponsePBImpl(
proxy.checkForDecommissioningNodes(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
RefreshClusterMaxPriorityRequest request) throws YarnException,
IOException {
RefreshClusterMaxPriorityRequestProto requestProto =
((RefreshClusterMaxPriorityRequestPBImpl) request).getProto();
try {
return new RefreshClusterMaxPriorityResponsePBImpl(
proxy.refreshClusterMaxPriority(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request)
throws YarnException, IOException {
NodesToAttributesMappingRequestProto requestProto =
((NodesToAttributesMappingRequestPBImpl) request).getProto();
try {
return new NodesToAttributesMappingResponsePBImpl(
proxy.mapAttributesToNodes(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
|
|
package main.java.engine.factory;
import java.awt.geom.Point2D;
import java.io.Serializable;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import jgame.impl.JGEngineInterface;
import main.java.engine.Model;
import main.java.engine.PathfinderManager;
import main.java.engine.map.TDMap;
import main.java.engine.objects.Exit;
import main.java.engine.objects.monster.Monster;
import main.java.engine.objects.powerup.TDPowerupPowerup;
import main.java.engine.objects.tower.BombTower;
import main.java.engine.objects.tower.FreezeTower;
import main.java.engine.objects.tower.ITower;
import main.java.engine.objects.tower.MoneyTower;
import main.java.engine.objects.tower.ShootingTower;
import main.java.engine.objects.tower.SimpleTower;
import main.java.engine.objects.tower.TowerBehaviors;
import main.java.engine.util.Reflection;
import main.java.exceptions.engine.ItemCreationFailureException;
import main.java.exceptions.engine.MonsterCreationFailureException;
import main.java.exceptions.engine.TowerCreationFailureException;
import main.java.schema.tdobjects.ItemSchema;
import main.java.schema.tdobjects.MonsterSchema;
import main.java.schema.tdobjects.TDObjectSchema;
import main.java.schema.tdobjects.TowerSchema;
/**
* A factory in charge of creating objects based on schemas
* Objects created include towers, monsters, and items
*
*/
public class TDObjectFactory {
private static final String ITEM_PATH = "main.java.engine.objects.item.";
private JGEngineInterface engine;
private Map<String, TDObjectSchema> tdObjectSchemaMap;
private List<String> possibleTowersNames;
private List<String> possibleItemNames;
private Map<String, String> towerMap;
private Map<String, String> itemMap;
public TDObjectFactory (JGEngineInterface engine) {
this.engine = engine;
tdObjectSchemaMap = new HashMap<>();
possibleTowersNames = new ArrayList<String>();
possibleItemNames = new ArrayList<String>();
towerMap = new HashMap<String, String>();
itemMap = new HashMap<String, String>();
}
/**
* Load tower defense object schemas into schema map
*
* @param schemas
*/
public void loadTDObjectSchemas (List<TDObjectSchema> schemas) {
for (TDObjectSchema s : schemas) {
String objName = (String) s.getAttributesMap().get(TDObjectSchema.NAME);
String objImagePath =
Model.RESOURCE_PATH + s.getAttributesMap().get(TDObjectSchema.IMAGE_NAME);
engine.defineImage(objName, "-", 1, objImagePath, "-");
tdObjectSchemaMap.put(objName, s);
}
}
/**
* Load tower schemas
*
* @param schemas
*/
@SuppressWarnings("unchecked")
public void loadTowerSchemas (List<TowerSchema> schemas) {
for (TowerSchema towerschema: schemas) {
String towerName = (String) towerschema.getAttributesMap().get(TDObjectSchema.NAME);
String towerDescription = (String) towerschema.getAttributesMap().get(TowerSchema.DESCRIPTION);
possibleTowersNames.add(towerName);
towerMap.put(towerName, towerDescription);
defineBulletImage(towerschema, TowerSchema.BULLET_IMAGE_NAME);
defineBulletImage(towerschema, TowerSchema.SHRAPNEL_IMAGE_NAME);
}
// Perhaps a better method of casting than using an intermediate wildcard type?
loadTDObjectSchemas((List<TDObjectSchema>)(List<?>) schemas);
}
/**
* Define the image of a bullet/shrapnel attribute in jgame engine.
* @param towerschema the tower schema
* @param imageNameConstant a constant of TowerSchema that is an image name attribute
*/
private void defineBulletImage (TowerSchema towerschema, String imageNameConstant) {
String bulletImageName = (String) towerschema.getAttributesMap().get(imageNameConstant);
String bulletImagePath = Model.RESOURCE_PATH + towerschema.getAttributesMap().get(imageNameConstant);
engine.defineImage(bulletImageName, "-", 1, bulletImagePath, "-");
}
/**
* Load monster schemas
*
* @param schemas
*/
@SuppressWarnings("unchecked")
public void loadMonsterSchemas (List<MonsterSchema> schemas) {
loadTDObjectSchemas((List<TDObjectSchema>)(List<?>) schemas);
}
/**
* Load item schemas
*
* @param schemas
*/
@SuppressWarnings("unchecked")
public void loadItemSchemas(List<ItemSchema> schemas) {
for (ItemSchema i: schemas) {
possibleItemNames.add((String) i.getAttributesMap().get(TDObjectSchema.NAME));
}
loadTDObjectSchemas((List<TDObjectSchema>)(List<?>) schemas);
}
/**
* Places an item at the given location.
* @param location
* @param itemName
* @return The new TDItem object
* @throws ItemCreationFailureException
* @throws ClassNotFoundException
* @throws InstantiationException
* @throws IllegalAccessException
* @throws IllegalArgumentException
* @throws InvocationTargetException
*/
public TDPowerupPowerup placeItem (Point2D location, String itemName) throws ItemCreationFailureException {
Point2D tileOrigin = TDMap.findTileOrigin(location);
try {
TDObjectSchema schema = tdObjectSchemaMap.get(itemName);
schema.addAttribute(ItemSchema.LOCATION, (Serializable) tileOrigin);
Object[] itemParameters = { schema.getAttributesMap() };
return (TDPowerupPowerup) placeObject(schema.getMyConcreteType(), itemParameters);
}
catch (Exception e) {
throw new ItemCreationFailureException(e);
}
}
/**
* Place tower at a given location's tile.
*
* @param location The coordinates to place the tower at
* @param towerName The name of the tower to place
* @return The new Tower object
* @throws TowerCreationFailureException
*/
public ITower placeTower (Point2D location, String towerName)
throws TowerCreationFailureException {
Point2D tileOrigin = TDMap.findTileOrigin(location);
try {
TDObjectSchema schema = tdObjectSchemaMap.get(towerName);
schema.addAttribute(TowerSchema.LOCATION, (Serializable) tileOrigin);
Object[] towerParameters = { schema.getAttributesMap() };
// return new MoneyTower(new ShootingTower((BaseTower)
// placeObject(schema.getMyConcreteType(), towerParameters), 10, 3, 200));
return addTowerBehaviors((SimpleTower) placeObject(schema.getMyConcreteType(),
towerParameters),
schema);
}
catch (Exception e) {
throw new TowerCreationFailureException(e);
}
}
private ITower addTowerBehaviors (SimpleTower baseTower, TDObjectSchema schema) {
ITower finalTower = baseTower;
Map<String, Serializable> attributes = schema.getAttributesMap();
Collection<TowerBehaviors> towerBehaviors =
(Collection<TowerBehaviors>) attributes.get(TowerSchema.TOWER_BEHAVIORS);
for (TowerBehaviors towerBehavior : towerBehaviors) {
Class<? extends ITower> concreteType = towerBehavior.getConcreteClass();
Object[] towerParameters = { finalTower, attributes };
finalTower = (ITower) placeObject(concreteType, towerParameters);
}
return finalTower;
}
/**
* Places a monster at set locations. Upon spawning, the monster will traverse from the entrance
* to the exit with a path finding algorithm.
*
* @param entrance The spawn location of the monster
* @param exit The exit location of the monster
* @param monsterName The name of the monster to place
* @return The new Monster object
* @throws MonsterCreationFailureException
*/
public Monster placeMonster (Point2D entrance, Exit exit,
PathfinderManager pathfinderManager,
String monsterName)
throws MonsterCreationFailureException {
try {
TDObjectSchema schema = tdObjectSchemaMap.get(monsterName);
schema.addAttribute(MonsterSchema.ENTRANCE_LOCATION, (Serializable) entrance);
schema.addAttribute(MonsterSchema.EXIT_LOCATION, exit);
schema.addAttribute(MonsterSchema.PATHFINDER_MANAGER, pathfinderManager);
Object[] monsterParameters = { schema.getAttributesMap() };
return (Monster) placeObject(schema.getMyConcreteType(), monsterParameters);
}
catch (Exception e) {
throw new MonsterCreationFailureException(e);
}
}
/**
* Uses the Reflection utility class to create the appropriate object with parameters
*
* @param objectType
* @param parameters
* @return
*/
private Object placeObject (Class<?> objectType, Object[] parameters) {
return Reflection.createInstance(objectType.getName(), parameters);
}
/**
* Returns the attributes of a TDobject from its schema, if schema doesn't exist, returns null
* @param objName
* @return unmodifiable map of attributes
*/
public Map<String, Serializable> getTDObjectAttributes(String objName) {
return tdObjectSchemaMap.containsKey(objName) ? Collections.unmodifiableMap(tdObjectSchemaMap.get(objName).getAttributesMap()) : null;
}
/**
* Returns names of towers that have loaded schemas, and can possibly be created.
* @return an unmodifiable list
*/
public List<String> getPossibleTowersNames(){
return Collections.unmodifiableList(possibleTowersNames);
}
/**
* Returns the names of items that have loaded schemas, and can possibly be created/
*
* @return an unmodifiable list
*/
public List<String> getPossibleItemNames() {
return Collections.unmodifiableList(possibleItemNames);
}
/**
* Returns the description associated with a tower.
*
* @param towerName
* @return
*/
public String getTowerDescription(String towerName) {
return towerMap.get(towerName);
}
/**
* Returns the description associated with an item
*
* @param itemName
* @return
*/
public String getItemDescription(String itemName) {
return itemMap.get(itemName);
}
}
|
|
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jetbrains.python.parsing;
import com.intellij.lang.SyntaxTreeBuilder;
import com.intellij.lang.WhitespacesAndCommentsBinder;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.NlsContexts.ParsingError;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import com.jetbrains.python.PyElementTypes;
import com.jetbrains.python.PyTokenTypes;
import org.jetbrains.annotations.Nullable;
import static com.jetbrains.python.PyPsiBundle.message;
/**
* @author yole
*/
public class ExpressionParsing extends Parsing {
private static final Logger LOG = Logger.getInstance(ExpressionParsing.class);
public static final WhitespacesAndCommentsBinder CONSUME_COMMENTS_AND_SPACES_TO_LEFT = (tokens, atStreamEdge, getter) -> tokens.size();
public ExpressionParsing(ParsingContext context) {
super(context);
}
public boolean parsePrimaryExpression(boolean isTargetExpression) {
final IElementType firstToken = myBuilder.getTokenType();
if (isIdentifier(myBuilder)) {
if (isTargetExpression) {
buildTokenElement(PyElementTypes.TARGET_EXPRESSION, myBuilder);
}
else {
buildTokenElement(getReferenceType(), myBuilder);
}
return true;
}
else if (firstToken == PyTokenTypes.INTEGER_LITERAL) {
buildTokenElement(PyElementTypes.INTEGER_LITERAL_EXPRESSION, myBuilder);
return true;
}
else if (firstToken == PyTokenTypes.FLOAT_LITERAL) {
buildTokenElement(PyElementTypes.FLOAT_LITERAL_EXPRESSION, myBuilder);
return true;
}
else if (firstToken == PyTokenTypes.IMAGINARY_LITERAL) {
buildTokenElement(PyElementTypes.IMAGINARY_LITERAL_EXPRESSION, myBuilder);
return true;
}
else if (firstToken == PyTokenTypes.NONE_KEYWORD) {
buildTokenElement(PyElementTypes.NONE_LITERAL_EXPRESSION, myBuilder);
return true;
}
else if (firstToken == PyTokenTypes.TRUE_KEYWORD ||
firstToken == PyTokenTypes.FALSE_KEYWORD ||
firstToken == PyTokenTypes.DEBUG_KEYWORD) {
buildTokenElement(PyElementTypes.BOOL_LITERAL_EXPRESSION, myBuilder);
return true;
}
else if (PyTokenTypes.STRING_NODES.contains(firstToken) || firstToken == PyTokenTypes.FSTRING_START) {
return parseStringLiteralExpression();
}
else if (firstToken == PyTokenTypes.LPAR) {
parseParenthesizedExpression(isTargetExpression);
return true;
}
else if (firstToken == PyTokenTypes.LBRACKET) {
parseListLiteralExpression(myBuilder, isTargetExpression);
return true;
}
else if (firstToken == PyTokenTypes.LBRACE) {
parseDictOrSetDisplay();
return true;
}
else if (firstToken == PyTokenTypes.TICK) {
parseReprExpression(myBuilder);
return true;
}
else if (parseEllipsis()) {
return true;
}
return false;
}
public boolean parseStringLiteralExpression() {
final SyntaxTreeBuilder builder = myContext.getBuilder();
IElementType tokenType = builder.getTokenType();
if (PyTokenTypes.STRING_NODES.contains(tokenType) || tokenType == PyTokenTypes.FSTRING_START) {
final SyntaxTreeBuilder.Marker marker = builder.mark();
while (true) {
tokenType = builder.getTokenType();
if (PyTokenTypes.STRING_NODES.contains(tokenType)) {
nextToken();
}
else if (tokenType == PyTokenTypes.FSTRING_START) {
parseFormattedStringNode();
}
else {
break;
}
}
marker.done(PyElementTypes.STRING_LITERAL_EXPRESSION);
return true;
}
return false;
}
private void parseFormattedStringNode() {
final SyntaxTreeBuilder builder = myContext.getBuilder();
if (atToken(PyTokenTypes.FSTRING_START)) {
final String prefixThenQuotes = builder.getTokenText();
assert prefixThenQuotes != null;
final String openingQuotes = prefixThenQuotes.replaceFirst("^[UuBbCcRrFf]*", "");
final SyntaxTreeBuilder.Marker marker = builder.mark();
nextToken();
while (true) {
if (atAnyOfTokens(PyTokenTypes.FSTRING_TEXT_TOKENS)) {
nextToken();
}
else if (atToken(PyTokenTypes.FSTRING_FRAGMENT_START)) {
parseFStringFragment();
}
else if (atToken(PyTokenTypes.FSTRING_END)) {
if (builder.getTokenText().equals(openingQuotes)) {
nextToken();
}
// Can be the end of an enclosing f-string, so leave it in the stream
else {
builder.mark().error(message("PARSE.expected.fstring.quote", openingQuotes));
}
break;
}
else if (atToken(PyTokenTypes.STATEMENT_BREAK)) {
builder.mark().error(message("PARSE.expected.fstring.quote", openingQuotes));
break;
}
else {
builder.error(message("unexpected.f.string.token"));
break;
}
}
marker.done(PyElementTypes.FSTRING_NODE);
}
}
private void parseFStringFragment() {
final SyntaxTreeBuilder builder = myContext.getBuilder();
if (atToken(PyTokenTypes.FSTRING_FRAGMENT_START)) {
final SyntaxTreeBuilder.Marker marker = builder.mark();
nextToken();
SyntaxTreeBuilder.Marker recoveryMarker = builder.mark();
final boolean parsedExpression = myContext.getExpressionParser().parseExpressionOptional();
if (parsedExpression) {
recoveryMarker.drop();
recoveryMarker = builder.mark();
}
boolean recovery = !parsedExpression;
while (!builder.eof() && !atAnyOfTokens(PyTokenTypes.FSTRING_FRAGMENT_TYPE_CONVERSION,
PyTokenTypes.FSTRING_FRAGMENT_FORMAT_START,
PyTokenTypes.FSTRING_FRAGMENT_END,
PyTokenTypes.FSTRING_END,
PyTokenTypes.STATEMENT_BREAK,
PyTokenTypes.EQ)) {
nextToken();
recovery = true;
}
if (recovery) {
recoveryMarker.error(parsedExpression ? message("unexpected.expression.part") : message("PARSE.expected.expression"));
recoveryMarker.setCustomEdgeTokenBinders(null, CONSUME_COMMENTS_AND_SPACES_TO_LEFT);
}
else {
recoveryMarker.drop();
}
matchToken(PyTokenTypes.EQ);
final boolean hasTypeConversion = matchToken(PyTokenTypes.FSTRING_FRAGMENT_TYPE_CONVERSION);
final boolean hasFormatPart = atToken(PyTokenTypes.FSTRING_FRAGMENT_FORMAT_START);
if (hasFormatPart) {
parseFStringFragmentFormatPart();
}
@ParsingError String errorMessage = message("PARSE.expected.fstring.rbrace");
if (!hasFormatPart && !atToken(PyTokenTypes.FSTRING_END)) {
errorMessage = message("PARSE.expected.fstring.colon.or.rbrace");
if (!hasTypeConversion) {
errorMessage = message("PARSE.expected.fstring.type.conversion.or.colon.or.rbrace");
}
}
checkMatches(PyTokenTypes.FSTRING_FRAGMENT_END, errorMessage);
marker.setCustomEdgeTokenBinders(null, CONSUME_COMMENTS_AND_SPACES_TO_LEFT);
marker.done(PyElementTypes.FSTRING_FRAGMENT);
}
}
private void parseFStringFragmentFormatPart() {
if (atToken(PyTokenTypes.FSTRING_FRAGMENT_FORMAT_START)) {
final SyntaxTreeBuilder.Marker marker = myContext.getBuilder().mark();
nextToken();
while (true) {
if (atAnyOfTokens(PyTokenTypes.FSTRING_TEXT_TOKENS)) {
nextToken();
}
else if (atToken(PyTokenTypes.FSTRING_FRAGMENT_START)) {
parseFStringFragment();
}
else {
break;
}
}
marker.done(PyElementTypes.FSTRING_FRAGMENT_FORMAT_PART);
}
}
private void parseListLiteralExpression(final SyntaxTreeBuilder builder, boolean isTargetExpression) {
LOG.assertTrue(builder.getTokenType() == PyTokenTypes.LBRACKET);
final SyntaxTreeBuilder.Marker expr = builder.mark();
builder.advanceLexer();
if (builder.getTokenType() == PyTokenTypes.RBRACKET) {
builder.advanceLexer();
expr.done(PyElementTypes.LIST_LITERAL_EXPRESSION);
return;
}
if (!parseNamedTestExpression(false, isTargetExpression)) {
builder.error(message("PARSE.expected.expression"));
}
if (atForOrAsyncFor()) {
parseComprehension(expr, PyTokenTypes.RBRACKET, PyElementTypes.LIST_COMP_EXPRESSION);
}
else {
while (builder.getTokenType() != PyTokenTypes.RBRACKET) {
if (!matchToken(PyTokenTypes.COMMA)) {
builder.error(message("rbracket.or.comma.expected"));
}
if (atToken(PyTokenTypes.RBRACKET)) {
break;
}
if (!parseNamedTestExpression(false, isTargetExpression)) {
builder.error(message("PARSE.expected.expr.or.comma.or.bracket"));
break;
}
}
checkMatches(PyTokenTypes.RBRACKET, message("PARSE.expected.rbracket"));
expr.done(PyElementTypes.LIST_LITERAL_EXPRESSION);
}
}
private void parseComprehension(SyntaxTreeBuilder.Marker expr,
@Nullable final IElementType endToken,
final IElementType exprType) {
assertCurrentToken(PyTokenTypes.FOR_KEYWORD);
while (true) {
myBuilder.advanceLexer();
parseStarTargets();
parseComprehensionRange(exprType == PyElementTypes.GENERATOR_EXPRESSION);
while (myBuilder.getTokenType() == PyTokenTypes.IF_KEYWORD) {
myBuilder.advanceLexer();
if (!parseOldExpression()) {
myBuilder.error(message("PARSE.expected.expression"));
}
}
if (atForOrAsyncFor()) {
continue;
}
if (endToken == null || matchToken(endToken)) {
break;
}
myBuilder.error(message("PARSE.expected.for.or.bracket"));
break;
}
expr.done(exprType);
}
public boolean parseStarTargets() {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseStarExpression(true)) {
myBuilder.error(message("PARSE.expected.expression"));
expr.drop();
return false;
}
if (myBuilder.getTokenType() == PyTokenTypes.COMMA) {
while (myBuilder.getTokenType() == PyTokenTypes.COMMA) {
myBuilder.advanceLexer();
SyntaxTreeBuilder.Marker expr2 = myBuilder.mark();
if (!parseStarExpression(true)) {
myBuilder.error(message("PARSE.expected.expression"));
expr2.rollbackTo();
break;
}
expr2.drop();
}
expr.done(PyElementTypes.TUPLE_EXPRESSION);
}
else {
expr.drop();
}
return true;
}
protected void parseComprehensionRange(boolean generatorExpression) {
checkMatches(PyTokenTypes.IN_KEYWORD, message("PARSE.expected.in"));
boolean result;
if (generatorExpression) {
result = parseORTestExpression(false, false);
}
else {
result = parseTupleExpression(false, false, true);
}
if (!result) {
myBuilder.error(message("PARSE.expected.expression"));
}
}
private void parseDictOrSetDisplay() {
LOG.assertTrue(myBuilder.getTokenType() == PyTokenTypes.LBRACE);
final SyntaxTreeBuilder.Marker expr = myBuilder.mark();
myBuilder.advanceLexer();
if (matchToken(PyTokenTypes.RBRACE)) {
expr.done(PyElementTypes.DICT_LITERAL_EXPRESSION);
return;
}
if (atToken(PyTokenTypes.EXP)) {
if (!parseDoubleStarExpression(false)) {
myBuilder.error(message("PARSE.expected.expression"));
expr.done(PyElementTypes.DICT_LITERAL_EXPRESSION);
return;
}
parseDictLiteralContentTail(expr);
return;
}
final SyntaxTreeBuilder.Marker firstExprMarker = myBuilder.mark();
if (!parseSingleExpression(false)) {
myBuilder.error(message("PARSE.expected.expression"));
firstExprMarker.drop();
expr.done(PyElementTypes.DICT_LITERAL_EXPRESSION);
return;
}
if (matchToken(PyTokenTypes.COLON)) {
parseDictLiteralTail(expr, firstExprMarker);
}
else if (atToken(PyTokenTypes.COMMA) || atToken(PyTokenTypes.RBRACE)) {
firstExprMarker.drop();
parseSetLiteralTail(expr);
}
else if (atForOrAsyncFor()) {
firstExprMarker.drop();
parseComprehension(expr, PyTokenTypes.RBRACE, PyElementTypes.SET_COMP_EXPRESSION);
}
else {
myBuilder.error(message("PARSE.expected.expression"));
firstExprMarker.drop();
expr.done(PyElementTypes.DICT_LITERAL_EXPRESSION);
}
}
private void parseDictLiteralTail(SyntaxTreeBuilder.Marker startMarker, SyntaxTreeBuilder.Marker firstKeyValueMarker) {
if (!parseSingleExpression(false)) {
myBuilder.error(message("PARSE.expected.expression"));
firstKeyValueMarker.done(PyElementTypes.KEY_VALUE_EXPRESSION);
if (atToken(PyTokenTypes.RBRACE)) {
myBuilder.advanceLexer();
}
startMarker.done(PyElementTypes.DICT_LITERAL_EXPRESSION);
return;
}
firstKeyValueMarker.done(PyElementTypes.KEY_VALUE_EXPRESSION);
if (atForOrAsyncFor()) {
parseComprehension(startMarker, PyTokenTypes.RBRACE, PyElementTypes.DICT_COMP_EXPRESSION);
}
else {
parseDictLiteralContentTail(startMarker);
}
}
private void parseDictLiteralContentTail(SyntaxTreeBuilder.Marker startMarker) {
while (myBuilder.getTokenType() != PyTokenTypes.RBRACE) {
checkMatches(PyTokenTypes.COMMA, message("PARSE.expected.comma"));
if (atToken(PyTokenTypes.EXP)) {
if (!parseDoubleStarExpression(false)) {
break;
}
}
else {
if (!parseKeyValueExpression()) {
break;
}
}
}
checkMatches(PyTokenTypes.RBRACE, message("PARSE.expected.rbrace"));
startMarker.done(PyElementTypes.DICT_LITERAL_EXPRESSION);
}
private boolean parseKeyValueExpression() {
final SyntaxTreeBuilder.Marker marker = myBuilder.mark();
if (!parseSingleExpression(false)) {
marker.drop();
return false;
}
checkMatches(PyTokenTypes.COLON, message("PARSE.expected.colon"));
if (!parseSingleExpression(false)) {
myBuilder.error(message("value.expression.expected"));
marker.drop();
return false;
}
marker.done(PyElementTypes.KEY_VALUE_EXPRESSION);
return true;
}
private void parseSetLiteralTail(SyntaxTreeBuilder.Marker startMarker) {
while (myBuilder.getTokenType() != PyTokenTypes.RBRACE) {
checkMatches(PyTokenTypes.COMMA, message("PARSE.expected.comma"));
if (!parseSingleExpression(false)) {
break;
}
}
checkMatches(PyTokenTypes.RBRACE, message("PARSE.expected.rbrace"));
startMarker.done(PyElementTypes.SET_LITERAL_EXPRESSION);
}
private void parseParenthesizedExpression(boolean isTargetExpression) {
LOG.assertTrue(myBuilder.getTokenType() == PyTokenTypes.LPAR);
final SyntaxTreeBuilder.Marker expr = myBuilder.mark();
myBuilder.advanceLexer();
if (myBuilder.getTokenType() == PyTokenTypes.RPAR) {
myBuilder.advanceLexer();
expr.done(PyElementTypes.TUPLE_EXPRESSION);
}
else {
parseYieldOrTupleExpression(isTargetExpression);
if (atForOrAsyncFor()) {
parseComprehension(expr, PyTokenTypes.RPAR, PyElementTypes.GENERATOR_EXPRESSION);
}
else {
final SyntaxTreeBuilder.Marker err = myBuilder.mark();
boolean empty = true;
while (!myBuilder.eof() &&
myBuilder.getTokenType() != PyTokenTypes.RPAR &&
myBuilder.getTokenType() != PyTokenTypes.LINE_BREAK &&
myBuilder.getTokenType() != PyTokenTypes.STATEMENT_BREAK &&
myBuilder.getTokenType() != PyTokenTypes.FSTRING_END) {
myBuilder.advanceLexer();
empty = false;
}
if (!empty) {
err.error(message("unexpected.expression.syntax"));
}
else {
err.drop();
}
checkMatches(PyTokenTypes.RPAR, message("PARSE.expected.rpar"));
expr.done(PyElementTypes.PARENTHESIZED_EXPRESSION);
}
}
}
private void parseReprExpression(SyntaxTreeBuilder builder) {
LOG.assertTrue(builder.getTokenType() == PyTokenTypes.TICK);
final SyntaxTreeBuilder.Marker expr = builder.mark();
builder.advanceLexer();
parseExpression();
checkMatches(PyTokenTypes.TICK, message("PARSE.expected.tick"));
expr.done(PyElementTypes.REPR_EXPRESSION);
}
public boolean parseMemberExpression(boolean isTargetExpression) {
// in sequence a.b.... .c all members but last are always references, and the last may be target.
boolean recastFirstIdentifier = false;
boolean recastQualifier = false;
do {
boolean firstIdentifierIsTarget = isTargetExpression && !recastFirstIdentifier;
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parsePrimaryExpression(firstIdentifierIsTarget)) {
expr.drop();
return false;
}
while (true) {
final IElementType tokenType = myBuilder.getTokenType();
if (tokenType == PyTokenTypes.DOT) {
if (firstIdentifierIsTarget) {
recastFirstIdentifier = true;
expr.rollbackTo();
break;
}
myBuilder.advanceLexer();
checkMatches(PyTokenTypes.IDENTIFIER, message("PARSE.expected.name"));
if (isTargetExpression && !recastQualifier && !atAnyOfTokens(PyTokenTypes.DOT, PyTokenTypes.LPAR, PyTokenTypes.LBRACKET)) {
expr.done(PyElementTypes.TARGET_EXPRESSION);
}
else {
expr.done(getReferenceType());
}
expr = expr.precede();
}
else if (tokenType == PyTokenTypes.LPAR) {
parseArgumentList();
expr.done(PyElementTypes.CALL_EXPRESSION);
expr = expr.precede();
}
else if (tokenType == PyTokenTypes.LBRACKET) {
myBuilder.advanceLexer();
SyntaxTreeBuilder.Marker sliceOrTupleStart = myBuilder.mark();
SyntaxTreeBuilder.Marker sliceItemStart = myBuilder.mark();
if (atToken(PyTokenTypes.COLON)) {
sliceOrTupleStart.drop();
SyntaxTreeBuilder.Marker sliceMarker = myBuilder.mark();
sliceMarker.done(PyElementTypes.EMPTY_EXPRESSION);
parseSliceEnd(expr, sliceItemStart);
}
else {
boolean hadExpression = parseSingleExpression(false);
if (atToken(PyTokenTypes.COLON)) {
sliceOrTupleStart.drop();
parseSliceEnd(expr, sliceItemStart);
}
else if (atToken(PyTokenTypes.COMMA)) {
sliceItemStart.done(PyElementTypes.SLICE_ITEM);
if (!parseSliceListTail(expr, sliceOrTupleStart)) {
sliceOrTupleStart.rollbackTo();
if (!parseTupleExpression(false, false, false)) {
myBuilder.error(message("tuple.expression.expected"));
}
checkMatches(PyTokenTypes.RBRACKET, message("PARSE.expected.rbracket"));
expr.done(PyElementTypes.SUBSCRIPTION_EXPRESSION);
}
}
else {
if (!hadExpression) {
myBuilder.error(message("PARSE.expected.expression"));
}
sliceOrTupleStart.drop();
sliceItemStart.drop();
checkMatches(PyTokenTypes.RBRACKET, message("PARSE.expected.rbracket"));
expr.done(PyElementTypes.SUBSCRIPTION_EXPRESSION);
}
}
if (isTargetExpression && !recastQualifier) {
recastFirstIdentifier = true; // subscription is always a reference
recastQualifier = true; // recast non-first qualifiers too
expr.rollbackTo();
break;
}
expr = expr.precede();
}
else {
expr.drop();
break;
}
recastFirstIdentifier = false; // it is true only after a break; normal flow always unsets it.
// recastQualifier is untouched, it remembers whether qualifiers were already recast
}
}
while (recastFirstIdentifier);
return true;
}
private boolean parseEllipsis() {
if (atToken(PyTokenTypes.DOT)) {
final SyntaxTreeBuilder.Marker maybeEllipsis = myBuilder.mark();
myBuilder.advanceLexer();
//duplication is intended as matchToken advances the lexer
//noinspection DuplicateBooleanBranch
if (matchToken(PyTokenTypes.DOT) && matchToken(PyTokenTypes.DOT)) {
maybeEllipsis.done(PyElementTypes.NONE_LITERAL_EXPRESSION);
return true;
}
maybeEllipsis.rollbackTo();
}
return false;
}
private static final TokenSet BRACKET_OR_COMMA = TokenSet.create(PyTokenTypes.RBRACKET, PyTokenTypes.COMMA);
private static final TokenSet BRACKET_COLON_COMMA = TokenSet.create(PyTokenTypes.RBRACKET, PyTokenTypes.COLON, PyTokenTypes.COMMA);
public void parseSliceEnd(SyntaxTreeBuilder.Marker exprStart, SyntaxTreeBuilder.Marker sliceItemStart) {
myBuilder.advanceLexer();
if (atToken(PyTokenTypes.RBRACKET)) {
SyntaxTreeBuilder.Marker sliceMarker = myBuilder.mark();
sliceMarker.done(PyElementTypes.EMPTY_EXPRESSION);
sliceItemStart.done(PyElementTypes.SLICE_ITEM);
nextToken();
exprStart.done(PyElementTypes.SLICE_EXPRESSION);
return;
}
else {
if (atToken(PyTokenTypes.COLON)) {
SyntaxTreeBuilder.Marker sliceMarker = myBuilder.mark();
sliceMarker.done(PyElementTypes.EMPTY_EXPRESSION);
}
else {
parseSingleExpression(false);
}
if (!BRACKET_COLON_COMMA.contains(myBuilder.getTokenType())) {
myBuilder.error(message("PARSE.expected.colon.or.rbracket"));
}
if (matchToken(PyTokenTypes.COLON)) {
parseSingleExpression(false);
}
sliceItemStart.done(PyElementTypes.SLICE_ITEM);
if (!BRACKET_OR_COMMA.contains(myBuilder.getTokenType())) {
myBuilder.error(message("rbracket.or.comma.expected"));
}
}
parseSliceListTail(exprStart, null);
}
private boolean parseSliceListTail(SyntaxTreeBuilder.Marker exprStart, @Nullable SyntaxTreeBuilder.Marker sliceOrTupleStart) {
boolean inSlice = sliceOrTupleStart == null;
while (atToken(PyTokenTypes.COMMA)) {
nextToken();
SyntaxTreeBuilder.Marker sliceItemStart = myBuilder.mark();
parseTestExpression(false, false);
if (matchToken(PyTokenTypes.COLON)) {
inSlice = true;
parseTestExpression(false, false);
if (matchToken(PyTokenTypes.COLON)) {
parseTestExpression(false, false);
}
}
sliceItemStart.done(PyElementTypes.SLICE_ITEM);
if (!BRACKET_OR_COMMA.contains(myBuilder.getTokenType())) {
myBuilder.error(message("rbracket.or.comma.expected"));
break;
}
}
checkMatches(PyTokenTypes.RBRACKET, message("PARSE.expected.rbracket"));
if (inSlice) {
if (sliceOrTupleStart != null) {
sliceOrTupleStart.drop();
}
exprStart.done(PyElementTypes.SLICE_EXPRESSION);
}
return inSlice;
}
public void parseArgumentList() {
LOG.assertTrue(myBuilder.getTokenType() == PyTokenTypes.LPAR);
final SyntaxTreeBuilder.Marker arglist = myBuilder.mark();
myBuilder.advanceLexer();
SyntaxTreeBuilder.Marker genexpr = myBuilder.mark();
int argNumber = 0;
while (myBuilder.getTokenType() != PyTokenTypes.RPAR) {
argNumber++;
if (argNumber > 1) {
if (argNumber == 2 && atForOrAsyncFor() && genexpr != null) {
parseComprehension(genexpr, null, PyElementTypes.GENERATOR_EXPRESSION);
genexpr = null;
continue;
}
else if (matchToken(PyTokenTypes.COMMA)) {
if (atToken(PyTokenTypes.RPAR)) {
break;
}
}
else {
myBuilder.error(message("PARSE.expected.comma.or.rpar"));
break;
}
}
if (myBuilder.getTokenType() == PyTokenTypes.MULT || myBuilder.getTokenType() == PyTokenTypes.EXP) {
final SyntaxTreeBuilder.Marker starArgMarker = myBuilder.mark();
myBuilder.advanceLexer();
if (!parseSingleExpression(false)) {
myBuilder.error(message("PARSE.expected.expression"));
}
starArgMarker.done(PyElementTypes.STAR_ARGUMENT_EXPRESSION);
}
else {
if (isIdentifier(myBuilder)) {
final SyntaxTreeBuilder.Marker keywordArgMarker = myBuilder.mark();
advanceIdentifierLike(myBuilder);
if (myBuilder.getTokenType() == PyTokenTypes.EQ) {
myBuilder.advanceLexer();
if (!parseSingleExpression(false)) {
myBuilder.error(message("PARSE.expected.expression"));
}
keywordArgMarker.done(PyElementTypes.KEYWORD_ARGUMENT_EXPRESSION);
continue;
}
keywordArgMarker.rollbackTo();
}
if (!parseNamedTestExpression(false, false)) {
myBuilder.error(message("PARSE.expected.expression"));
break;
}
}
}
if (genexpr != null) {
genexpr.drop();
}
checkMatches(PyTokenTypes.RPAR, message("PARSE.expected.rpar"));
arglist.done(PyElementTypes.ARGUMENT_LIST);
}
public boolean parseExpressionOptional() {
return parseTupleExpression(false, false, false);
}
public boolean parseExpressionOptional(boolean isTargetExpression) {
return parseTupleExpression(false, isTargetExpression, false);
}
public void parseExpression() {
if (!parseExpressionOptional()) {
myBuilder.error(message("PARSE.expected.expression"));
}
}
public void parseExpression(boolean stopOnIn, boolean isTargetExpression) {
if (!parseTupleExpression(stopOnIn, isTargetExpression, false)) {
myBuilder.error(message("PARSE.expected.expression"));
}
}
public boolean parseYieldOrTupleExpression(final boolean isTargetExpression) {
if (myBuilder.getTokenType() == PyTokenTypes.YIELD_KEYWORD) {
SyntaxTreeBuilder.Marker yieldExpr = myBuilder.mark();
myBuilder.advanceLexer();
if (myBuilder.getTokenType() == PyTokenTypes.FROM_KEYWORD) {
myBuilder.advanceLexer();
final boolean parsed = parseTupleExpression(false, isTargetExpression, false);
if (!parsed) {
myBuilder.error(message("PARSE.expected.expression"));
}
yieldExpr.done(PyElementTypes.YIELD_EXPRESSION);
return parsed;
}
else {
parseTupleExpression(false, isTargetExpression, false);
yieldExpr.done(PyElementTypes.YIELD_EXPRESSION);
return true;
}
}
else {
return parseTupleExpression(false, isTargetExpression, false);
}
}
protected boolean parseTupleExpression(boolean stopOnIn, boolean isTargetExpression, final boolean oldTest) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
boolean exprParseResult = oldTest ? parseOldTestExpression() : parseNamedTestExpression(stopOnIn, isTargetExpression);
if (!exprParseResult) {
expr.drop();
return false;
}
if (myBuilder.getTokenType() == PyTokenTypes.COMMA) {
while (myBuilder.getTokenType() == PyTokenTypes.COMMA) {
myBuilder.advanceLexer();
SyntaxTreeBuilder.Marker expr2 = myBuilder.mark();
exprParseResult = oldTest ? parseOldTestExpression() : parseNamedTestExpression(stopOnIn, isTargetExpression);
if (!exprParseResult) {
expr2.rollbackTo();
break;
}
expr2.drop();
}
expr.done(PyElementTypes.TUPLE_EXPRESSION);
}
else {
expr.drop();
}
return true;
}
public boolean parseSingleExpression(boolean isTargetExpression) {
return parseTestExpression(false, isTargetExpression);
}
public boolean parseOldExpression() {
if (myBuilder.getTokenType() == PyTokenTypes.LAMBDA_KEYWORD) {
return parseLambdaExpression(false);
}
return parseORTestExpression(false, false);
}
public boolean parseNamedTestExpression(boolean stopOnIn, boolean isTargetExpression) {
final SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (isIdentifier(myBuilder) && myBuilder.lookAhead(1) == PyTokenTypes.COLONEQ) {
buildTokenElement(PyElementTypes.TARGET_EXPRESSION, myBuilder);
myBuilder.advanceLexer();
if (!parseTestExpression(stopOnIn, false)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.ASSIGNMENT_EXPRESSION);
return true;
}
else if (parseTestExpression(stopOnIn, isTargetExpression)) {
if (!atToken(PyTokenTypes.COLONEQ)) {
expr.drop();
return true;
}
else {
// we intentionally allow syntactically illegal assignment expressions like `self.attr := 42` or `xs[0] := 42` for user convenience
// but don't parse qualified references in LHS as target expressions (unlike in assignment statements)
myBuilder.error(message("PARSE.expected.identifier"));
myBuilder.advanceLexer();
if (!parseTestExpression(stopOnIn, false)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.ASSIGNMENT_EXPRESSION);
return false;
}
}
else {
expr.drop();
return false;
}
}
private boolean parseTestExpression(boolean stopOnIn, boolean isTargetExpression) {
if (myBuilder.getTokenType() == PyTokenTypes.LAMBDA_KEYWORD) {
return parseLambdaExpression(false);
}
SyntaxTreeBuilder.Marker condExpr = myBuilder.mark();
if (!parseORTestExpression(stopOnIn, isTargetExpression)) {
condExpr.drop();
return false;
}
if (myBuilder.getTokenType() == PyTokenTypes.IF_KEYWORD) {
SyntaxTreeBuilder.Marker conditionMarker = myBuilder.mark();
myBuilder.advanceLexer();
if (!parseORTestExpression(stopOnIn, isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
else {
if (myBuilder.getTokenType() != PyTokenTypes.ELSE_KEYWORD) {
if (atToken(PyTokenTypes.COLON)) { // it's regular if statement. Bracket wasn't closed or new line was lost
conditionMarker.rollbackTo();
condExpr.drop();
return true;
}
else {
myBuilder.error(message("PARSE.expected.else"));
}
}
else {
myBuilder.advanceLexer();
if (!parseTestExpression(stopOnIn, isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
}
}
conditionMarker.drop();
condExpr.done(PyElementTypes.CONDITIONAL_EXPRESSION);
}
else {
condExpr.drop();
}
return true;
}
private boolean parseOldTestExpression() {
if (myBuilder.getTokenType() == PyTokenTypes.LAMBDA_KEYWORD) {
return parseLambdaExpression(true);
}
return parseORTestExpression(false, false);
}
private boolean parseLambdaExpression(final boolean oldTest) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
myBuilder.advanceLexer();
getFunctionParser().parseParameterListContents(PyTokenTypes.COLON, false, true);
boolean parseExpressionResult = oldTest ? parseOldTestExpression() : parseSingleExpression(false);
if (!parseExpressionResult) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.LAMBDA_EXPRESSION);
return true;
}
protected boolean parseORTestExpression(boolean stopOnIn, boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseANDTestExpression(stopOnIn, isTargetExpression)) {
expr.drop();
return false;
}
while (myBuilder.getTokenType() == PyTokenTypes.OR_KEYWORD) {
myBuilder.advanceLexer();
if (!parseANDTestExpression(stopOnIn, isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseANDTestExpression(boolean stopOnIn, boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseNOTTestExpression(stopOnIn, isTargetExpression)) {
expr.drop();
return false;
}
while (myBuilder.getTokenType() == PyTokenTypes.AND_KEYWORD) {
myBuilder.advanceLexer();
if (!parseNOTTestExpression(stopOnIn, isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseNOTTestExpression(boolean stopOnIn, boolean isTargetExpression) {
if (myBuilder.getTokenType() == PyTokenTypes.NOT_KEYWORD) {
final SyntaxTreeBuilder.Marker expr = myBuilder.mark();
myBuilder.advanceLexer();
if (!parseNOTTestExpression(stopOnIn, isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.PREFIX_EXPRESSION);
return true;
}
else {
return parseComparisonExpression(stopOnIn, isTargetExpression);
}
}
private boolean parseComparisonExpression(boolean stopOnIn, boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseStarExpression(isTargetExpression)) {
expr.drop();
return false;
}
if (stopOnIn && atToken(PyTokenTypes.IN_KEYWORD)) {
expr.drop();
return true;
}
while (PyTokenTypes.COMPARISON_OPERATIONS.contains(myBuilder.getTokenType())) {
if (atToken(PyTokenTypes.NOT_KEYWORD)) {
SyntaxTreeBuilder.Marker notMarker = myBuilder.mark();
myBuilder.advanceLexer();
if (!atToken(PyTokenTypes.IN_KEYWORD)) {
notMarker.rollbackTo();
break;
}
notMarker.drop();
myBuilder.advanceLexer();
}
else if (atToken(PyTokenTypes.IS_KEYWORD)) {
myBuilder.advanceLexer();
if (myBuilder.getTokenType() == PyTokenTypes.NOT_KEYWORD) {
myBuilder.advanceLexer();
}
}
else {
myBuilder.advanceLexer();
}
if (!parseBitwiseORExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseStarExpression(boolean isTargetExpression) {
if (atToken(PyTokenTypes.MULT)) {
SyntaxTreeBuilder.Marker starExpr = myBuilder.mark();
nextToken();
if (!parseBitwiseORExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
starExpr.drop();
return false;
}
starExpr.done(PyElementTypes.STAR_EXPRESSION);
return true;
}
return parseBitwiseORExpression(isTargetExpression);
}
private boolean parseDoubleStarExpression(boolean isTargetExpression) {
if (atToken(PyTokenTypes.EXP)) {
SyntaxTreeBuilder.Marker starExpr = myBuilder.mark();
nextToken();
if (!parseBitwiseORExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
starExpr.drop();
return false;
}
starExpr.done(PyElementTypes.DOUBLE_STAR_EXPRESSION);
return true;
}
return parseBitwiseORExpression(isTargetExpression);
}
private boolean parseBitwiseORExpression(boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseBitwiseXORExpression(isTargetExpression)) {
expr.drop();
return false;
}
while (atToken(PyTokenTypes.OR)) {
myBuilder.advanceLexer();
if (!parseBitwiseXORExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseBitwiseXORExpression(boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseBitwiseANDExpression(isTargetExpression)) {
expr.drop();
return false;
}
while (atToken(PyTokenTypes.XOR)) {
myBuilder.advanceLexer();
if (!parseBitwiseANDExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseBitwiseANDExpression(boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseShiftExpression(isTargetExpression)) {
expr.drop();
return false;
}
while (atToken(PyTokenTypes.AND)) {
myBuilder.advanceLexer();
if (!parseShiftExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseShiftExpression(boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseAdditiveExpression(myBuilder, isTargetExpression)) {
expr.drop();
return false;
}
while (PyTokenTypes.SHIFT_OPERATIONS.contains(myBuilder.getTokenType())) {
myBuilder.advanceLexer();
if (!parseAdditiveExpression(myBuilder, isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseAdditiveExpression(final SyntaxTreeBuilder myBuilder, boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseMultiplicativeExpression(isTargetExpression)) {
expr.drop();
return false;
}
while (PyTokenTypes.ADDITIVE_OPERATIONS.contains(myBuilder.getTokenType())) {
myBuilder.advanceLexer();
if (!parseMultiplicativeExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
private boolean parseMultiplicativeExpression(boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseUnaryExpression(isTargetExpression)) {
expr.drop();
return false;
}
while (PyTokenTypes.MULTIPLICATIVE_OPERATIONS.contains(myBuilder.getTokenType())) {
myBuilder.advanceLexer();
if (!parseUnaryExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
expr = expr.precede();
}
expr.drop();
return true;
}
protected boolean parseUnaryExpression(boolean isTargetExpression) {
final IElementType tokenType = myBuilder.getTokenType();
if (PyTokenTypes.UNARY_OPERATIONS.contains(tokenType)) {
final SyntaxTreeBuilder.Marker expr = myBuilder.mark();
myBuilder.advanceLexer();
if (!parseUnaryExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.PREFIX_EXPRESSION);
return true;
}
else {
return parsePowerExpression(isTargetExpression);
}
}
private boolean parsePowerExpression(boolean isTargetExpression) {
SyntaxTreeBuilder.Marker expr = myBuilder.mark();
if (!parseAwaitExpression(isTargetExpression)) {
expr.drop();
return false;
}
if (myBuilder.getTokenType() == PyTokenTypes.EXP) {
myBuilder.advanceLexer();
if (!parseUnaryExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
}
expr.done(PyElementTypes.BINARY_EXPRESSION);
}
else {
expr.drop();
}
return true;
}
private boolean parseAwaitExpression(boolean isTargetExpression) {
if (atToken(PyTokenTypes.AWAIT_KEYWORD)) {
final SyntaxTreeBuilder.Marker expr = myBuilder.mark();
myBuilder.advanceLexer();
if (!parseMemberExpression(isTargetExpression)) {
myBuilder.error(message("PARSE.expected.expression"));
expr.done(PyElementTypes.PREFIX_EXPRESSION);
}
else {
if (isTargetExpression) {
expr.error(message("can.t.assign.to.await.expression"));
}
else {
expr.done(PyElementTypes.PREFIX_EXPRESSION);
}
}
return true;
}
else {
return parseMemberExpression(isTargetExpression);
}
}
private boolean atForOrAsyncFor() {
if (atToken(PyTokenTypes.FOR_KEYWORD)) {
return true;
}
else if (matchToken(PyTokenTypes.ASYNC_KEYWORD)) {
if (atToken(PyTokenTypes.FOR_KEYWORD)) {
return true;
}
else {
myBuilder.error(message("for.expected"));
return false;
}
}
return false;
}
}
|
|
package br.ufsc.ine.leb.projetos.estoria.testes;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
import org.junit.runner.Description;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import br.ufsc.ine.leb.projetos.estoria.EscoltadorDeTestes;
import br.ufsc.ine.leb.projetos.estoria.SuiteDeTeste;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.classes.ClasseVazia;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.ClasseDeTeste101;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.ClasseDeTeste110;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.SuiteDeTeste11;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.SuiteDeTeste12;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.SuiteDeTeste13;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.SuiteDeTeste14;
import br.ufsc.ine.leb.projetos.estoria.testes.figuracao.testes.SuiteDeTeste16;
@RunWith(JUnit4.class)
public final class TesteEscoltadorDeTestesDescricoes {
@Test
public void semTestesJunit() throws Exception {
Description descricao = Description.EMPTY;
assertNull(descricao.getTestClass());
assertEquals("No Tests", descricao.getDisplayName());
assertEquals("No Tests", descricao.getClassName());
assertNull(descricao.getMethodName());
assertFalse(descricao.isSuite());
assertTrue(descricao.isTest());
assertTrue(descricao.isEmpty());
assertEquals(0, descricao.getChildren().size());
assertEquals(1, descricao.testCount());
}
@Test
public void zeroClasses() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(SuiteDeTeste11.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDoSeletor = escoltador.getDescription();
assertEquals(SuiteDeTeste11.class, descricaoDoSeletor.getTestClass());
assertEquals(SuiteDeTeste11.class.getName(), descricaoDoSeletor.getClassName());
assertNull(descricaoDoSeletor.getMethodName());
assertTrue(descricaoDoSeletor.isSuite());
assertFalse(descricaoDoSeletor.isTest());
assertFalse(descricaoDoSeletor.isEmpty());
assertEquals(1, descricaoDoSeletor.getChildren().size());
assertEquals(1, descricaoDoSeletor.testCount());
assertEquals(1, escoltador.testCount());
Description descricaoDoTeste = descricaoDoSeletor.getChildren().get(0);
assertNull(descricaoDoTeste.getTestClass());
assertEquals("No Tests", descricaoDoTeste.getClassName());
assertNull(descricaoDoTeste.getMethodName());
assertFalse(descricaoDoTeste.isSuite());
assertTrue(descricaoDoTeste.isTest());
assertTrue(descricaoDoTeste.isEmpty());
assertEquals(0, descricaoDoTeste.getChildren().size());
assertEquals(1, descricaoDoTeste.testCount());
assertEquals(1, escoltador.testCount());
}
@Test
public void zeroTestesDeUmaClasse() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(SuiteDeTeste12.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDoSeletor = escoltador.getDescription();
assertEquals(SuiteDeTeste12.class, descricaoDoSeletor.getTestClass());
assertEquals(SuiteDeTeste12.class.getName(), descricaoDoSeletor.getClassName());
assertEquals(null, descricaoDoSeletor.getMethodName());
assertTrue(descricaoDoSeletor.isSuite());
assertFalse(descricaoDoSeletor.isTest());
assertFalse(descricaoDoSeletor.isEmpty());
assertEquals(1, descricaoDoSeletor.getChildren().size());
assertEquals(1, descricaoDoSeletor.testCount());
assertEquals(1, escoltador.testCount());
Description descricaoDaClasseDeTeste = descricaoDoSeletor.getChildren().get(0);
assertEquals(ClasseVazia.class, descricaoDaClasseDeTeste.getTestClass());
assertEquals(ClasseVazia.class.getName(), descricaoDaClasseDeTeste.getClassName());
assertEquals(null, descricaoDaClasseDeTeste.getMethodName());
assertTrue(descricaoDaClasseDeTeste.isSuite());
assertFalse(descricaoDaClasseDeTeste.isTest());
assertFalse(descricaoDaClasseDeTeste.isEmpty());
assertEquals(1, descricaoDaClasseDeTeste.getChildren().size());
assertEquals(1, descricaoDaClasseDeTeste.testCount());
Description descricaoDoTeste = descricaoDaClasseDeTeste.getChildren().get(0);
assertNull(descricaoDoTeste.getTestClass());
assertEquals("No Tests", descricaoDoTeste.getClassName());
assertNull(descricaoDoTeste.getMethodName());
assertFalse(descricaoDoTeste.isSuite());
assertTrue(descricaoDoTeste.isTest());
assertTrue(descricaoDoTeste.isEmpty());
assertEquals(0, descricaoDoTeste.getChildren().size());
assertEquals(1, descricaoDoTeste.testCount());
assertEquals(1, descricaoDoTeste.testCount());
}
@Test
public void umTesteDeUmaClasse() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(SuiteDeTeste13.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDoSeletor = escoltador.getDescription();
assertEquals(SuiteDeTeste13.class, descricaoDoSeletor.getTestClass());
assertEquals(SuiteDeTeste13.class.getName(), descricaoDoSeletor.getClassName());
assertEquals(null, descricaoDoSeletor.getMethodName());
assertTrue(descricaoDoSeletor.isSuite());
assertFalse(descricaoDoSeletor.isTest());
assertFalse(descricaoDoSeletor.isEmpty());
assertEquals(1, descricaoDoSeletor.getChildren().size());
assertEquals(1, descricaoDoSeletor.testCount());
Description descricaoDaClasseDeTeste = descricaoDoSeletor.getChildren().get(0);
assertEquals(ClasseDeTeste101.class, descricaoDaClasseDeTeste.getTestClass());
assertEquals(ClasseDeTeste101.class.getName(), descricaoDaClasseDeTeste.getClassName());
assertEquals(null, descricaoDaClasseDeTeste.getMethodName());
assertTrue(descricaoDaClasseDeTeste.isSuite());
assertFalse(descricaoDaClasseDeTeste.isTest());
assertFalse(descricaoDaClasseDeTeste.isEmpty());
assertEquals(1, descricaoDaClasseDeTeste.getChildren().size());
assertEquals(1, descricaoDaClasseDeTeste.testCount());
Description descricaoDoTeste = descricaoDaClasseDeTeste.getChildren().get(0);
assertEquals(ClasseDeTeste101.class, descricaoDoTeste.getTestClass());
assertEquals(ClasseDeTeste101.class.getName(), descricaoDoTeste.getClassName());
assertEquals("testar", descricaoDoTeste.getMethodName());
assertFalse(descricaoDoTeste.isSuite());
assertTrue(descricaoDoTeste.isTest());
assertFalse(descricaoDoTeste.isEmpty());
assertEquals(0, descricaoDoTeste.getChildren().size());
assertEquals(1, descricaoDoTeste.testCount());
assertEquals(1, escoltador.testCount());
}
@Test
public void doisTestesDeUmaClasse() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(SuiteDeTeste14.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDoSeletor = escoltador.getDescription();
assertEquals(SuiteDeTeste14.class, descricaoDoSeletor.getTestClass());
assertEquals(SuiteDeTeste14.class.getName(), descricaoDoSeletor.getClassName());
assertEquals(null, descricaoDoSeletor.getMethodName());
assertTrue(descricaoDoSeletor.isSuite());
assertFalse(descricaoDoSeletor.isTest());
assertFalse(descricaoDoSeletor.isEmpty());
assertEquals(1, descricaoDoSeletor.getChildren().size());
assertEquals(2, descricaoDoSeletor.testCount());
assertEquals(2, escoltador.testCount());
Description descricaoDaClasseDeTeste = descricaoDoSeletor.getChildren().get(0);
assertEquals(ClasseDeTeste110.class, descricaoDaClasseDeTeste.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDaClasseDeTeste.getClassName());
assertEquals(null, descricaoDaClasseDeTeste.getMethodName());
assertTrue(descricaoDaClasseDeTeste.isSuite());
assertFalse(descricaoDaClasseDeTeste.isTest());
assertFalse(descricaoDaClasseDeTeste.isEmpty());
assertEquals(2, descricaoDaClasseDeTeste.getChildren().size());
assertEquals(2, descricaoDaClasseDeTeste.testCount());
Description descricaoDoTeste1 = descricaoDaClasseDeTeste.getChildren().get(0);
assertEquals(ClasseDeTeste110.class, descricaoDoTeste1.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoTeste1.getClassName());
assertEquals("testar1", descricaoDoTeste1.getMethodName());
assertFalse(descricaoDoTeste1.isSuite());
assertTrue(descricaoDoTeste1.isTest());
assertFalse(descricaoDoTeste1.isEmpty());
assertEquals(0, descricaoDoTeste1.getChildren().size());
assertEquals(1, descricaoDoTeste1.testCount());
Description descricaoDoTeste2 = descricaoDaClasseDeTeste.getChildren().get(1);
assertEquals(ClasseDeTeste110.class, descricaoDoTeste2.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoTeste2.getClassName());
assertEquals("testar2", descricaoDoTeste2.getMethodName());
assertFalse(descricaoDoTeste2.isSuite());
assertTrue(descricaoDoTeste2.isTest());
assertFalse(descricaoDoTeste2.isEmpty());
assertEquals(0, descricaoDoTeste2.getChildren().size());
assertEquals(1, descricaoDoTeste2.testCount());
}
@Test
public void tresTestesDeDuasClasses() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(SuiteDeTeste16.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDoSeletor = escoltador.getDescription();
assertEquals(SuiteDeTeste16.class, descricaoDoSeletor.getTestClass());
assertEquals(SuiteDeTeste16.class.getName(), descricaoDoSeletor.getClassName());
assertEquals(null, descricaoDoSeletor.getMethodName());
assertTrue(descricaoDoSeletor.isSuite());
assertFalse(descricaoDoSeletor.isTest());
assertFalse(descricaoDoSeletor.isEmpty());
assertEquals(2, descricaoDoSeletor.getChildren().size());
assertEquals(3, descricaoDoSeletor.testCount());
assertEquals(3, escoltador.testCount());
Description descricaoDaClasseDeTeste1 = descricaoDoSeletor.getChildren().get(0);
assertEquals(ClasseDeTeste101.class, descricaoDaClasseDeTeste1.getTestClass());
assertEquals(ClasseDeTeste101.class.getName(), descricaoDaClasseDeTeste1.getClassName());
assertEquals(null, descricaoDaClasseDeTeste1.getMethodName());
assertTrue(descricaoDaClasseDeTeste1.isSuite());
assertFalse(descricaoDaClasseDeTeste1.isTest());
assertFalse(descricaoDaClasseDeTeste1.isEmpty());
assertEquals(1, descricaoDaClasseDeTeste1.getChildren().size());
assertEquals(1, descricaoDaClasseDeTeste1.testCount());
Description descricaoDaClasseDeTeste2 = descricaoDoSeletor.getChildren().get(1);
assertEquals(ClasseDeTeste110.class, descricaoDaClasseDeTeste2.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDaClasseDeTeste2.getClassName());
assertEquals(null, descricaoDaClasseDeTeste2.getMethodName());
assertTrue(descricaoDaClasseDeTeste2.isSuite());
assertFalse(descricaoDaClasseDeTeste2.isTest());
assertFalse(descricaoDaClasseDeTeste2.isEmpty());
assertEquals(2, descricaoDaClasseDeTeste2.getChildren().size());
assertEquals(2, descricaoDaClasseDeTeste2.testCount());
Description descricaoDoTeste1 = descricaoDaClasseDeTeste1.getChildren().get(0);
assertEquals(ClasseDeTeste101.class, descricaoDoTeste1.getTestClass());
assertEquals(ClasseDeTeste101.class.getName(), descricaoDoTeste1.getClassName());
assertEquals("testar", descricaoDoTeste1.getMethodName());
assertFalse(descricaoDoTeste1.isSuite());
assertTrue(descricaoDoTeste1.isTest());
assertFalse(descricaoDoTeste1.isEmpty());
assertEquals(0, descricaoDoTeste1.getChildren().size());
assertEquals(1, descricaoDoTeste1.testCount());
Description descricaoDoTeste2 = descricaoDaClasseDeTeste2.getChildren().get(0);
assertEquals(ClasseDeTeste110.class, descricaoDoTeste2.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoTeste2.getClassName());
assertEquals("testar1", descricaoDoTeste2.getMethodName());
assertFalse(descricaoDoTeste2.isSuite());
assertTrue(descricaoDoTeste2.isTest());
assertFalse(descricaoDoTeste2.isEmpty());
assertEquals(0, descricaoDoTeste2.getChildren().size());
assertEquals(1, descricaoDoTeste2.testCount());
Description descricaoDoTeste3 = descricaoDaClasseDeTeste2.getChildren().get(1);
assertEquals(ClasseDeTeste110.class, descricaoDoTeste3.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoTeste3.getClassName());
assertEquals("testar2", descricaoDoTeste3.getMethodName());
assertFalse(descricaoDoTeste3.isSuite());
assertTrue(descricaoDoTeste3.isTest());
assertFalse(descricaoDoTeste3.isEmpty());
assertEquals(0, descricaoDoTeste3.getChildren().size());
assertEquals(1, descricaoDoTeste3.testCount());
}
@Test
public void zeroTestesDeUmaClasseSemSuite() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(ClasseVazia.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDaClasseDeTeste = escoltador.getDescription();
assertEquals(ClasseVazia.class, descricaoDaClasseDeTeste.getTestClass());
assertEquals(ClasseVazia.class.getName(), descricaoDaClasseDeTeste.getClassName());
assertEquals(null, descricaoDaClasseDeTeste.getMethodName());
assertTrue(descricaoDaClasseDeTeste.isSuite());
assertFalse(descricaoDaClasseDeTeste.isTest());
assertFalse(descricaoDaClasseDeTeste.isEmpty());
assertEquals(1, descricaoDaClasseDeTeste.getChildren().size());
assertEquals(1, descricaoDaClasseDeTeste.testCount());
Description descricaoDoTeste = descricaoDaClasseDeTeste.getChildren().get(0);
assertNull(descricaoDoTeste.getTestClass());
assertEquals("No Tests", descricaoDoTeste.getClassName());
assertNull(descricaoDoTeste.getMethodName());
assertFalse(descricaoDoTeste.isSuite());
assertTrue(descricaoDoTeste.isTest());
assertTrue(descricaoDoTeste.isEmpty());
assertEquals(0, descricaoDoTeste.getChildren().size());
assertEquals(1, descricaoDoTeste.testCount());
assertEquals(1, descricaoDoTeste.testCount());
}
@Test
public void doisTestesDeUmaClasseSemSuite() throws Exception {
SuiteDeTeste suite = new SuiteDeTeste(ClasseDeTeste110.class);
EscoltadorDeTestes escoltador = new EscoltadorDeTestes(suite);
Description descricaoDoSeletor = escoltador.getDescription();
assertEquals(ClasseDeTeste110.class, descricaoDoSeletor.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoSeletor.getClassName());
assertEquals(null, descricaoDoSeletor.getMethodName());
assertTrue(descricaoDoSeletor.isSuite());
assertFalse(descricaoDoSeletor.isTest());
assertFalse(descricaoDoSeletor.isEmpty());
assertEquals(2, descricaoDoSeletor.getChildren().size());
assertEquals(2, descricaoDoSeletor.testCount());
assertEquals(2, escoltador.testCount());
Description descricaoDoTeste1 = descricaoDoSeletor.getChildren().get(0);
assertEquals(ClasseDeTeste110.class, descricaoDoTeste1.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoTeste1.getClassName());
assertEquals("testar1", descricaoDoTeste1.getMethodName());
assertFalse(descricaoDoTeste1.isSuite());
assertTrue(descricaoDoTeste1.isTest());
assertFalse(descricaoDoTeste1.isEmpty());
assertEquals(0, descricaoDoTeste1.getChildren().size());
assertEquals(1, descricaoDoTeste1.testCount());
Description descricaoDoTeste2 = descricaoDoSeletor.getChildren().get(1);
assertEquals(ClasseDeTeste110.class, descricaoDoTeste2.getTestClass());
assertEquals(ClasseDeTeste110.class.getName(), descricaoDoTeste2.getClassName());
assertEquals("testar2", descricaoDoTeste2.getMethodName());
assertFalse(descricaoDoTeste2.isSuite());
assertTrue(descricaoDoTeste2.isTest());
assertFalse(descricaoDoTeste2.isEmpty());
assertEquals(0, descricaoDoTeste2.getChildren().size());
assertEquals(1, descricaoDoTeste2.testCount());
}
}
|
|
/*
* Copyright 2015-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.apple;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.facebook.buck.apple.toolchain.ApplePlatform;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.io.filesystem.TestProjectFilesystems;
import com.facebook.buck.io.filesystem.impl.DefaultProjectFilesystemFactory;
import com.facebook.buck.model.BuildTargetFactory;
import com.facebook.buck.model.BuildTargets;
import com.facebook.buck.testutil.TemporaryPaths;
import com.facebook.buck.testutil.TestConsole;
import com.facebook.buck.testutil.integration.ProjectWorkspace;
import com.facebook.buck.testutil.integration.TestDataHelper;
import com.facebook.buck.testutil.integration.ZipInspector;
import com.facebook.buck.util.DefaultProcessExecutor;
import com.facebook.buck.util.ProcessExecutor;
import com.facebook.buck.util.ProcessExecutorParams;
import com.facebook.buck.util.environment.Platform;
import com.facebook.buck.util.unarchive.ArchiveFormat;
import com.facebook.buck.util.unarchive.ExistingFileMode;
import com.google.common.collect.ImmutableList;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.EnumSet;
import java.util.Optional;
import java.util.Set;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
public class BuiltinApplePackageIntegrationTest {
@Rule public TemporaryPaths tmp = new TemporaryPaths();
private ProjectFilesystem filesystem;
@Before
public void setUp() throws InterruptedException {
assumeTrue(Platform.detect() == Platform.MACOS);
assumeTrue(AppleNativeIntegrationTestUtils.isApplePlatformAvailable(ApplePlatform.MACOSX));
filesystem = TestProjectFilesystems.createProjectFilesystem(tmp.getRoot());
}
private static boolean isDirEmpty(Path directory) throws IOException {
try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory)) {
return !dirStream.iterator().hasNext();
}
}
@Test
public void packageHasProperStructure() throws IOException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(
this, "simple_application_bundle_no_debug", tmp);
workspace.setUp();
workspace.enableDirCache();
BuildTarget appTarget =
BuildTargetFactory.newInstance("//:DemoApp#no-debug,no-include-frameworks");
workspace
.runBuckCommand("build", appTarget.getUnflavoredBuildTarget().getFullyQualifiedName())
.assertSuccess();
workspace.getBuildLog().assertTargetBuiltLocally(appTarget.getFullyQualifiedName());
workspace.runBuckCommand("clean", "--keep-cache").assertSuccess();
BuildTarget packageTarget = BuildTargetFactory.newInstance("//:DemoAppPackage");
workspace.runBuckCommand("build", packageTarget.getFullyQualifiedName()).assertSuccess();
workspace.getBuildLog().assertTargetWasFetchedFromCache(appTarget.getFullyQualifiedName());
workspace.getBuildLog().assertTargetBuiltLocally(packageTarget.getFullyQualifiedName());
Path templateDir =
TestDataHelper.getTestDataScenario(this, "simple_application_bundle_no_debug");
ZipInspector zipInspector =
new ZipInspector(
workspace.getPath(BuildTargets.getGenPath(filesystem, packageTarget, "%s.ipa")));
zipInspector.assertFileExists("Payload/DemoApp.app/DemoApp");
zipInspector.assertFileDoesNotExist("WatchKitSupport");
zipInspector.assertFileDoesNotExist("WatchKitSupport2");
zipInspector.assertFileContents(
"Payload/DemoApp.app/PkgInfo",
new String(
Files.readAllBytes(
templateDir.resolve("DemoApp_output.expected/DemoApp.app/PkgInfo.expected")),
UTF_8));
}
@Test
public void packageHasProperStructureForSwift() throws IOException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(
this, "simple_application_bundle_swift_no_debug", tmp);
workspace.setUp();
workspace.enableDirCache();
BuildTarget packageTarget = BuildTargetFactory.newInstance("//:DemoAppPackage");
workspace.runBuckCommand("build", packageTarget.getFullyQualifiedName()).assertSuccess();
workspace.getBuildLog().assertTargetBuiltLocally(packageTarget.getFullyQualifiedName());
ZipInspector zipInspector =
new ZipInspector(
workspace.getPath(BuildTargets.getGenPath(filesystem, packageTarget, "%s.ipa")));
zipInspector.assertFileExists("SwiftSupport/iphonesimulator/libswiftCore.dylib");
}
@Test
public void swiftSupportIsOnlyAddedIfPackageContainsSwiftCode() throws IOException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(
this, "simple_application_bundle_no_debug", tmp);
workspace.setUp();
workspace.enableDirCache();
BuildTarget packageTarget = BuildTargetFactory.newInstance("//:DemoAppPackage");
workspace.runBuckCommand("build", packageTarget.getFullyQualifiedName()).assertSuccess();
workspace.getBuildLog().assertTargetBuiltLocally(packageTarget.getFullyQualifiedName());
ZipInspector zipInspector =
new ZipInspector(
workspace.getPath(BuildTargets.getGenPath(filesystem, packageTarget, "%s.ipa")));
zipInspector.assertFileDoesNotExist("SwiftSupport");
}
@Test
public void packageHasProperStructureForWatch20() throws IOException, InterruptedException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(this, "watch_application_bundle", tmp);
workspace.setUp();
workspace.addBuckConfigLocalOption("apple", "watchsimulator_target_sdk_version", "2.0");
packageHasProperStructureForWatchHelper(workspace, true);
}
@Test
public void packageHasProperStructureForWatch21() throws IOException, InterruptedException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(this, "watch_application_bundle", tmp);
workspace.setUp();
workspace.addBuckConfigLocalOption("apple", "watchsimulator_target_sdk_version", "2.1");
packageHasProperStructureForWatchHelper(workspace, false);
}
private void packageHasProperStructureForWatchHelper(
ProjectWorkspace workspace, boolean shouldHaveStubInsideBundle)
throws IOException, InterruptedException {
BuildTarget packageTarget = BuildTargetFactory.newInstance("//:DemoAppPackage");
workspace.runBuckCommand("build", packageTarget.getFullyQualifiedName()).assertSuccess();
Path destination = workspace.getDestPath();
ArchiveFormat.ZIP
.getUnarchiver()
.extractArchive(
new DefaultProjectFilesystemFactory(),
workspace.getPath(BuildTargets.getGenPath(filesystem, packageTarget, "%s.ipa")),
destination,
ExistingFileMode.OVERWRITE_AND_CLEAN_DIRECTORIES);
Path stubOutsideBundle = destination.resolve("WatchKitSupport2/WK");
assertTrue(Files.isExecutable(stubOutsideBundle));
assertTrue(Files.isDirectory(destination.resolve("Symbols")));
assertTrue(isDirEmpty(destination.resolve("Symbols")));
if (shouldHaveStubInsideBundle) {
Path stubInsideBundle =
destination.resolve("Payload/DemoApp.app/Watch/DemoWatchApp.app/_WatchKitStub/WK");
assertTrue(Files.exists(stubInsideBundle));
assertEquals(
new String(Files.readAllBytes(stubInsideBundle)),
new String(Files.readAllBytes(stubOutsideBundle)));
}
}
@Test
public void packageHasProperStructureForLegacyWatch() throws IOException, InterruptedException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(
this, "legacy_watch_application_bundle", tmp);
workspace.setUp();
BuildTarget packageTarget = BuildTargetFactory.newInstance("//:DemoAppPackage");
workspace.runBuckCommand("build", packageTarget.getFullyQualifiedName()).assertSuccess();
Path destination = workspace.getDestPath();
ArchiveFormat.ZIP
.getUnarchiver()
.extractArchive(
new DefaultProjectFilesystemFactory(),
workspace.getPath(BuildTargets.getGenPath(filesystem, packageTarget, "%s.ipa")),
destination,
ExistingFileMode.OVERWRITE_AND_CLEAN_DIRECTORIES);
Path stub = destination.resolve("WatchKitSupport/WK");
assertTrue(Files.isExecutable(stub));
assertFalse(Files.isDirectory(destination.resolve("Symbols")));
}
@Test
public void packageSupportsFatBinaries() throws IOException, InterruptedException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(
this, "simple_application_bundle_no_debug", tmp);
workspace.setUp();
BuildTarget packageTarget =
BuildTargetFactory.newInstance(
"//:DemoAppPackage#iphonesimulator-i386,iphonesimulator-x86_64");
workspace.runBuckCommand("build", packageTarget.getFullyQualifiedName()).assertSuccess();
ArchiveFormat.ZIP
.getUnarchiver()
.extractArchive(
new DefaultProjectFilesystemFactory(),
workspace.getPath(BuildTargets.getGenPath(filesystem, packageTarget, "%s.ipa")),
workspace.getDestPath(),
ExistingFileMode.OVERWRITE_AND_CLEAN_DIRECTORIES);
ProcessExecutor executor = new DefaultProcessExecutor(new TestConsole());
ProcessExecutorParams processExecutorParams =
ProcessExecutorParams.builder()
.setCommand(
ImmutableList.of(
"lipo",
"-info",
workspace.getDestPath().resolve("Payload/DemoApp.app/DemoApp").toString()))
.build();
// Specify that stdout is expected, or else output may be wrapped in Ansi escape chars.
Set<ProcessExecutor.Option> options =
EnumSet.of(ProcessExecutor.Option.EXPECTING_STD_OUT, ProcessExecutor.Option.IS_SILENT);
ProcessExecutor.Result result =
executor.launchAndExecute(
processExecutorParams,
options,
/* stdin */ Optional.empty(),
/* timeOutMs */ Optional.empty(),
/* timeOutHandler */ Optional.empty());
assertEquals(result.getExitCode(), 0);
assertTrue(result.getStdout().isPresent());
String output = result.getStdout().get();
assertTrue(output.contains("i386"));
assertTrue(output.contains("x86_64"));
}
@Test
public void testDisablingPackageCaching() throws IOException {
ProjectWorkspace workspace =
TestDataHelper.createProjectWorkspaceForScenario(
this, "simple_application_bundle_no_debug", tmp);
workspace.setUp();
workspace.enableDirCache();
workspace
.runBuckBuild("-c", "apple.cache_bundles_and_packages=false", "//:DemoAppPackage")
.assertSuccess();
workspace.runBuckCommand("clean", "--keep-cache");
workspace
.runBuckBuild("-c", "apple.cache_bundles_and_packages=false", "//:DemoAppPackage")
.assertSuccess();
workspace.getBuildLog().assertTargetBuiltLocally("//:DemoAppPackage");
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sqoop.repository.common;
import static org.apache.sqoop.repository.common.CommonRepositorySchemaConstants.*;
public class CommonRepositoryInsertUpdateDeleteSelectQuery {
/**
* ****DIRECTION TABLE *************
*/
public static final String STMT_SELECT_SQD_ID_BY_SQD_NAME =
"SELECT " + CommonRepoUtils.escapeColumnName(COLUMN_SQD_ID)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_DIRECTION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQD_NAME) + "=?";
public static final String STMT_SELECT_SQD_NAME_BY_SQD_ID =
"SELECT " + CommonRepoUtils.escapeColumnName(COLUMN_SQD_NAME)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_DIRECTION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQD_ID) + "=?";
/**
* ******CONFIGURABLE TABLE **************
*/
//DML: Get configurable by given name
public static final String STMT_SELECT_FROM_CONFIGURABLE =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_CLASS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_VERSION)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIGURABLE_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQC_NAME) + " = ?";
//DML: Get all configurables for a given type
public static final String STMT_SELECT_CONFIGURABLE_ALL_FOR_TYPE =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_CLASS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_VERSION)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIGURABLE_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQC_TYPE) + " = ?";
//DML: Insert into configurable
public static final String STMT_INSERT_INTO_CONFIGURABLE =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIGURABLE_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_CLASS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_VERSION) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_TYPE)
+ ") VALUES (?, ?, ?, ?)";
//Delete all configs for a given configurable
public static final String STMT_DELETE_CONFIGS_FOR_CONFIGURABLE =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + " = ?";
//Delete all inputs for a given configurable
public static final String STMT_DELETE_INPUTS_FOR_CONFIGURABLE =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG)
+ " IN (SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_ID)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_NAME)
+ " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + " = ?)";
public static final String STMT_DELETE_INPUT_RELATIONS_FOR_INPUT =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_RELATION_NAME)
+ " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQIR_PARENT)
+ " IN (SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG)
+ " IN (SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_ID)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_NAME)
+ " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + " = ?))";
//Update the configurable
public static final String STMT_UPDATE_CONFIGURABLE =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIGURABLE_NAME)
+ " SET " + CommonRepoUtils.escapeColumnName(COLUMN_SQC_NAME) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_CLASS) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_VERSION) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQC_TYPE) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQC_ID) + " = ?";
/**
* *******CONFIG TABLE *************
*/
//DML: Get all configs for a given configurable
public static final String STMT_SELECT_CONFIG_FOR_CONFIGURABLE =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_TYPE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_INDEX)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + " = ? "
+ " ORDER BY " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_INDEX);
//DML: Insert into config
public static final String STMT_INSERT_INTO_CONFIG =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_TYPE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_INDEX)
+ ") VALUES ( ?, ?, ?, ?)";
/**
* ******* INPUT TABLE *************
*/
// DML: Get inputs for a given config
public static final String STMT_SELECT_INPUT =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_TYPE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRMASK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRLENGTH) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_EDITABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ENUMVALS) + ", "
+ "cast(null as varchar(100))"
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + " = ?"
+ " ORDER BY " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX);
// DML get Input by Id
public static final String STMT_SELECT_INPUT_BY_ID =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID) + " = ?";
// DML get Input by name
public static final String STMT_SELECT_INPUT_BY_NAME =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME) + " = ?";
// DML: Insert into config input
public static final String STMT_INSERT_INTO_INPUT =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_TYPE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRMASK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRLENGTH) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_EDITABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ENUMVALS)
+ ") VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
/********** INPUT-RELATIONSHIP TABLE **************/
public static final String STMT_INSERT_INTO_INPUT_RELATION =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_RELATION_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQIR_PARENT) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQIR_CHILD)
+ ") VALUES (?, ?)";
public static final String STMT_FETCH_SQ_INPUT_OVERRIDES =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQIR_CHILD)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_RELATION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQIR_PARENT) + " = ?";
/**
* *******LINK INPUT TABLE *************
*/
//DML: Get inputs and values for a given link
public static final String STMT_FETCH_LINK_INPUT =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_TYPE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRMASK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRLENGTH) + ","
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_EDITABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ENUMVALS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_VALUE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " LEFT OUTER JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_INPUT_NAME)
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_INPUT) + " = " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID)
+ " AND " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_LINK) + " = ?"
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + " = ?"
+ " AND (" + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_LINK) + " = ?" + " OR " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_LINK) + " IS NULL)"
+ " ORDER BY " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX);
/**
* *******JOB INPUT TABLE *************
*/
//DML: Fetch inputs and values for a given job
public static final String STMT_FETCH_JOB_INPUT =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_TYPE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRMASK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_STRLENGTH) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_EDITABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQI_ENUMVALS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQBI_VALUE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_INPUT_NAME)
+ " LEFT OUTER JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_INPUT_NAME)
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_INPUT) + " = " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_ID)
+ " AND " + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_JOB) + " = ?"
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_CONFIG) + " = ?"
+ " AND (" + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_JOB) + " = ? OR " + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_JOB) + " IS NULL)"
+ " ORDER BY " + CommonRepoUtils.escapeColumnName(COLUMN_SQI_INDEX);
/**
* *******LINK TABLE *************
*/
// DML: Insert new link
public static final String STMT_INSERT_LINK =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ENABLED) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_DATE)
+ ") VALUES (?, ?, ?, ?, ?, ?, ?)";
// DML: Insert new link inputs
public static final String STMT_INSERT_LINK_INPUT =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_INPUT_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_INPUT) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_VALUE)
+ ") VALUES (?, ?, ?)";
// DML: Update link
public static final String STMT_UPDATE_LINK =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_USER) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_DATE) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + " = ?";
// DML: Enable or disable link
public static final String STMT_ENABLE_LINK =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ENABLED) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + " = ?";
// UPDATE the LINK Input
public static final String STMT_UPDATE_LINK_INPUT =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_INPUT_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_VALUE) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_INPUT) + " = ?"
+ " AND " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_LINK) + " = ?";
// DML: Delete rows from link input table
public static final String STMT_DELETE_LINK_INPUT =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_INPUT_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNKI_LINK) + " = ?";
// DML: Delete row from link table
public static final String STMT_DELETE_LINK =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + " = ?";
// DML: Select one specific link
public static final String STMT_SELECT_LINK_SINGLE =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ENABLED) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_DATE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + " = ?";
// DML: Select one specific link by name
public static final String STMT_SELECT_LINK_SINGLE_BY_NAME =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ENABLED) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_DATE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + " = ?";
// DML: Select all links
public static final String STMT_SELECT_LINK_ALL =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ENABLED) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_DATE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME);
// DML: Select all links for a specific connector.
public static final String STMT_SELECT_LINK_FOR_CONNECTOR_CONFIGURABLE =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ENABLED) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_UPDATE_DATE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + " = ?";
// DML: Check if given link exists
public static final String STMT_SELECT_LINK_CHECK_BY_ID =
"SELECT count(*) FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + " = ?";
/**
* *******JOB TABLE *************
*/
// DML: Insert new job
public static final String STMT_INSERT_JOB =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_FROM_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_TO_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_ENABLED) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_UPDATE_DATE)
+ ") VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
// DML: Insert new job inputs
public static final String STMT_INSERT_JOB_INPUT =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_INPUT_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQBI_JOB) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQBI_INPUT) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQBI_VALUE)
+ ") VALUES (?, ?, ?)";
public static final String STMT_UPDATE_JOB =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_NAME) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_UPDATE_USER) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_UPDATE_DATE) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ID) + " = ?";
// DML: Enable or disable job
public static final String STMT_ENABLE_JOB =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQB_ENABLED) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ID) + " = ?";
// UPDATE the JOB Input
public static final String STMT_UPDATE_JOB_INPUT =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_INPUT_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQBI_VALUE) + " = ? "
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_INPUT) + " = ?"
+ " AND " + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_JOB) + " = ?";
// DML: Delete rows from job input table
public static final String STMT_DELETE_JOB_INPUT =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_INPUT_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQBI_JOB) + " = ?";
// DML: Delete row from job table
public static final String STMT_DELETE_JOB =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ID) + " = ?";
// DML: Check if given job exists
public static final String STMT_SELECT_JOB_CHECK_BY_ID =
"SELECT count(*) FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ID) + " = ?";
// DML: Check if there are jobs for given link
public static final String STMT_SELECT_JOBS_FOR_LINK_CHECK =
"SELECT"
+ " count(*)"
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME)
+ " JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME)
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_FROM_LINK) + " = " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID) + " = ? ";
//DML: Select all jobs
public static final String STMT_SELECT_JOB_ALL =
"SELECT "
+ "FROM_CONNECTOR." + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ "TO_CONNECTOR." + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ID) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_NAME) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_FROM_LINK) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_TO_LINK) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ENABLED) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_CREATION_USER) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_CREATION_DATE) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_UPDATE_USER) + ", "
+ "JOB." + CommonRepoUtils.escapeColumnName(COLUMN_SQB_UPDATE_DATE)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_JOB_NAME) + " JOB"
+ " LEFT JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME) + " FROM_CONNECTOR"
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_FROM_LINK) + " = FROM_CONNECTOR." + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID)
+ " LEFT JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_LINK_NAME) + " TO_CONNECTOR"
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_TO_LINK) + " = TO_CONNECTOR." + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_ID);
// DML: Select one specific job
public static final String STMT_SELECT_JOB_SINGLE_BY_ID =
STMT_SELECT_JOB_ALL +
" WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_ID) + " = ?";
// DML: Select one specific job
public static final String STMT_SELECT_JOB_SINGLE_BY_NAME =
STMT_SELECT_JOB_ALL +
" WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQB_NAME) + " = ?";
// DML: Select all jobs for a Connector
public static final String STMT_SELECT_ALL_JOBS_FOR_CONNECTOR_CONFIGURABLE =
STMT_SELECT_JOB_ALL +
" WHERE FROM_CONNECTOR." + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + " = ?" +
" OR TO_CONNECTOR." + CommonRepoUtils.escapeColumnName(COLUMN_SQ_LNK_CONFIGURABLE) + " = ?";
/**
* *******SUBMISSION TABLE *************
*/
// DML: Insert new submission
public static final String STMT_INSERT_SUBMISSION =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME) + "("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_JOB) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_STATUS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_SUMMARY) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_DETAILS) + ") "
+ " VALUES(?, ?, ?, ?, ?, ?, ?, substr(?, 1, 150) , substr(?, 1, 150), substr(?, 1, 750))";
// DML: Update existing submission
public static final String STMT_UPDATE_SUBMISSION =
"UPDATE " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME) + " SET "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_STATUS) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_USER) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + " = ?, "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_SUMMARY) + " = substr(?, 1, 150), "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_DETAILS) + " = substr(?, 1, 750)"
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_ID) + " = ?";
// DML: Check if given submission exists
public static final String STMT_SELECT_SUBMISSION_CHECK =
"SELECT"
+ " count(*)"
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_ID) + " = ?";
// DML: Purge old entries
public static final String STMT_PURGE_SUBMISSIONS =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + " < ?";
// DML: Get unfinished
public static final String STMT_SELECT_SUBMISSION_UNFINISHED =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_JOB) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_STATUS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_SUMMARY) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_DETAILS)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_STATUS) + " = ?";
// DML : Get all submissions
public static final String STMT_SELECT_SUBMISSIONS =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_JOB) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_STATUS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_SUMMARY) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_DETAILS)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME)
+ " ORDER BY " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + " DESC";
// DML: Get submissions for a job
public static final String STMT_SELECT_SUBMISSIONS_FOR_JOB =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_JOB) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_STATUS) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_CREATION_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_USER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_EXTERNAL_LINK) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_SUMMARY) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQS_ERROR_DETAILS)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_SUBMISSION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_JOB) + " = ?"
+ " ORDER BY " + CommonRepoUtils.escapeColumnName(COLUMN_SQS_UPDATE_DATE) + " DESC";
// DML: Select counter group
public static final String STMT_SELECT_COUNTER_GROUP =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQG_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQG_NAME) + " "
+ "FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_GROUP_NAME) + " "
+ "WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQG_NAME) + " = substr(?, 1, 75)";
// DML: Insert new counter group
public static final String STMT_INSERT_COUNTER_GROUP =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_GROUP_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQG_NAME) + ") "
+ "VALUES (substr(?, 1, 75))";
// DML: Select counter
public static final String STMT_SELECT_COUNTER =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQR_ID) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQR_NAME) + " "
+ "FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_NAME) + " "
+ "WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQR_NAME) + " = substr(?, 1, 75)";
// DML: Insert new counter
public static final String STMT_INSERT_COUNTER =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQR_NAME) + ") "
+ "VALUES (substr(?, 1, 75))";
// DML: Insert new counter submission
public static final String STMT_INSERT_COUNTER_SUBMISSION =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_SUBMISSION_NAME) + " ("
+ CommonRepoUtils.escapeColumnName(COLUMN_SQRS_GROUP) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQRS_COUNTER) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQRS_SUBMISSION) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQRS_VALUE) + ") "
+ "VALUES (?, ?, ?, ?)";
// DML: Select counter submission
public static final String STMT_SELECT_COUNTER_SUBMISSION =
"SELECT "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQG_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQR_NAME) + ", "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQRS_VALUE) + " "
+ "FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_SUBMISSION_NAME) + " "
+ "LEFT JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_GROUP_NAME)
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQRS_GROUP) + " = " + CommonRepoUtils.escapeColumnName(COLUMN_SQG_ID) + " "
+ "LEFT JOIN " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_NAME)
+ " ON " + CommonRepoUtils.escapeColumnName(COLUMN_SQRS_COUNTER) + " = " + CommonRepoUtils.escapeColumnName(COLUMN_SQR_ID) + " "
+ "WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQRS_SUBMISSION) + " = ? ";
// DML: Delete rows from counter submission table
public static final String STMT_DELETE_COUNTER_SUBMISSION =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_COUNTER_SUBMISSION_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQRS_SUBMISSION) + " = ?";
/**
* **** CONFIG and CONNECTOR DIRECTIONS ***
*/
public static final String STMT_INSERT_SQ_CONNECTOR_DIRECTIONS =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONNECTOR_DIRECTIONS_NAME) + " "
+ "(" + CommonRepoUtils.escapeColumnName(COLUMN_SQCD_CONNECTOR) + ", " + CommonRepoUtils.escapeColumnName(COLUMN_SQCD_DIRECTION) + ")"
+ " VALUES (?, ?)";
public static final String STMT_INSERT_SQ_CONFIG_DIRECTIONS =
"INSERT INTO " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_DIRECTIONS_NAME) + " "
+ "(" + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_DIR_CONFIG) + ", " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_DIR_DIRECTION) + ")"
+ " VALUES (?, ?)";
public static final String STMT_SELECT_SQ_CONNECTOR_DIRECTIONS_ALL =
"SELECT " + CommonRepoUtils.escapeColumnName(COLUMN_SQCD_CONNECTOR) + ", " + CommonRepoUtils.escapeColumnName(COLUMN_SQCD_DIRECTION)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONNECTOR_DIRECTIONS_NAME);
public static final String STMT_SELECT_SQ_CONNECTOR_DIRECTIONS =
STMT_SELECT_SQ_CONNECTOR_DIRECTIONS_ALL + " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQCD_CONNECTOR) + " = ?";
public static final String STMT_SELECT_SQ_CONFIG_DIRECTIONS_ALL =
"SELECT " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_DIR_CONFIG) + ", " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_DIR_DIRECTION)
+ " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_DIRECTIONS_NAME);
public static final String STMT_SELECT_SQ_CONFIG_DIRECTIONS =
STMT_SELECT_SQ_CONFIG_DIRECTIONS_ALL + " WHERE "
+ CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_DIR_CONFIG) + " = ?";
// Delete the config directions for a connector
public static final String STMT_DELETE_DIRECTIONS_FOR_CONFIGURABLE =
"DELETE FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_DIRECTIONS_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_DIR_CONFIG)
+ " IN (SELECT " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_ID) + " FROM " + CommonRepoUtils.getTableName(SCHEMA_SQOOP, TABLE_SQ_CONFIG_NAME)
+ " WHERE " + CommonRepoUtils.escapeColumnName(COLUMN_SQ_CFG_CONFIGURABLE) + " = ?)";
public String getStmtSelectSqdIdBySqdName() {
return STMT_SELECT_SQD_ID_BY_SQD_NAME;
}
public String getStmtSelectSqConfigDirections() {
return STMT_SELECT_SQ_CONFIG_DIRECTIONS;
}
public String getStmtDeleteInputsForConfigurable() {
return STMT_DELETE_INPUTS_FOR_CONFIGURABLE;
}
public String getStmtUpdateConfigurable() {
return STMT_UPDATE_CONFIGURABLE;
}
public String getStmtSelectConfigForConfigurable() {
return STMT_SELECT_CONFIG_FOR_CONFIGURABLE;
}
public String getStmtInsertIntoConfig() {
return STMT_INSERT_INTO_CONFIG;
}
public String getStmtSelectInput() {
return STMT_SELECT_INPUT;
}
public String getStmtSelectInputById() {
return STMT_SELECT_INPUT_BY_ID;
}
public String getStmtSelectInputByName() {
return STMT_SELECT_INPUT_BY_NAME;
}
public String getStmtSelectInputOverrides() {
return STMT_FETCH_SQ_INPUT_OVERRIDES;
}
public String getStmtInsertIntoInput() {
return STMT_INSERT_INTO_INPUT;
}
public String getStmtFetchLinkInput() {
return STMT_FETCH_LINK_INPUT;
}
public String getStmtFetchJobInput() {
return STMT_FETCH_JOB_INPUT;
}
public String getStmtInsertLink() {
return STMT_INSERT_LINK;
}
public String getStmtInsertLinkInput() {
return STMT_INSERT_LINK_INPUT;
}
public String getStmtUpdateLink() {
return STMT_UPDATE_LINK;
}
public String getStmtEnableLink() {
return STMT_ENABLE_LINK;
}
public String getStmtUpdateLinkInput() {
return STMT_UPDATE_LINK_INPUT;
}
public String getStmtDeleteLinkInput() {
return STMT_DELETE_LINK_INPUT;
}
public String getStmtDeleteLink() {
return STMT_DELETE_LINK;
}
public String getStmtSelectLinkSingle() {
return STMT_SELECT_LINK_SINGLE;
}
public String getStmtSelectLinkSingleByName() {
return STMT_SELECT_LINK_SINGLE_BY_NAME;
}
public String getStmtSelectLinkAll() {
return STMT_SELECT_LINK_ALL;
}
public String getStmtSelectLinkForConnectorConfigurable() {
return STMT_SELECT_LINK_FOR_CONNECTOR_CONFIGURABLE;
}
public String getStmtSelectLinkCheckById() {
return STMT_SELECT_LINK_CHECK_BY_ID;
}
public String getStmtInsertJob() {
return STMT_INSERT_JOB;
}
public String getStmtInsertJobInput() {
return STMT_INSERT_JOB_INPUT;
}
public String getStmtUpdateJob() {
return STMT_UPDATE_JOB;
}
public String getStmtEnableJob() {
return STMT_ENABLE_JOB;
}
public String getStmtUpdateJobInput() {
return STMT_UPDATE_JOB_INPUT;
}
public String getStmtDeleteJobInput() {
return STMT_DELETE_JOB_INPUT;
}
public String getStmtDeleteJob() {
return STMT_DELETE_JOB;
}
public String getStmtSelectJobCheckById() {
return STMT_SELECT_JOB_CHECK_BY_ID;
}
public String getStmtSelectJobsForLinkCheck() {
return STMT_SELECT_JOBS_FOR_LINK_CHECK;
}
public String getStmtSelectJobAll() {
return STMT_SELECT_JOB_ALL;
}
public String getStmtSelectJobSingleById() {
return STMT_SELECT_JOB_SINGLE_BY_ID;
}
public String getStmtSelectJobSingleByName() {
return STMT_SELECT_JOB_SINGLE_BY_NAME;
}
public String getStmtSelectAllJobsForConnectorConfigurable() {
return STMT_SELECT_ALL_JOBS_FOR_CONNECTOR_CONFIGURABLE;
}
public String getStmtInsertSubmission() {
return STMT_INSERT_SUBMISSION;
}
public String getStmtUpdateSubmission() {
return STMT_UPDATE_SUBMISSION;
}
public String getStmtSelectSubmissionCheck() {
return STMT_SELECT_SUBMISSION_CHECK;
}
public String getStmtPurgeSubmissions() {
return STMT_PURGE_SUBMISSIONS;
}
public String getStmtSelectSubmissionUnfinished() {
return STMT_SELECT_SUBMISSION_UNFINISHED;
}
public String getStmtSelectSubmissions() {
return STMT_SELECT_SUBMISSIONS;
}
public String getStmtSelectSubmissionsForJob() {
return STMT_SELECT_SUBMISSIONS_FOR_JOB;
}
public String getStmtSelectCounterGroup() {
return STMT_SELECT_COUNTER_GROUP;
}
public String getStmtInsertCounterGroup() {
return STMT_INSERT_COUNTER_GROUP;
}
public String getStmtSelectCounter() {
return STMT_SELECT_COUNTER;
}
public String getStmtInsertCounter() {
return STMT_INSERT_COUNTER;
}
public String getStmtInsertCounterSubmission() {
return STMT_INSERT_COUNTER_SUBMISSION;
}
public String getStmtSelectCounterSubmission() {
return STMT_SELECT_COUNTER_SUBMISSION;
}
public String getStmtDeleteCounterSubmission() {
return STMT_DELETE_COUNTER_SUBMISSION;
}
public String getStmtInsertSqConnectorDirections() {
return STMT_INSERT_SQ_CONNECTOR_DIRECTIONS;
}
public String getStmtInsertSqConfigDirections() {
return STMT_INSERT_SQ_CONFIG_DIRECTIONS;
}
public String getStmtSelectSqConnectorDirectionsAll() {
return STMT_SELECT_SQ_CONNECTOR_DIRECTIONS_ALL;
}
public String getStmtSelectSqConnectorDirections() {
return STMT_SELECT_SQ_CONNECTOR_DIRECTIONS;
}
public String getStmtSelectSqConfigDirectionsAll() {
return STMT_SELECT_SQ_CONFIG_DIRECTIONS_ALL;
}
public String getStmtSelectSqdNameBySqdId() {
return STMT_SELECT_SQD_NAME_BY_SQD_ID;
}
public String getStmtSelectFromConfigurable() {
return STMT_SELECT_FROM_CONFIGURABLE;
}
public String getStmtSelectConfigurableAllForType() {
return STMT_SELECT_CONFIGURABLE_ALL_FOR_TYPE;
}
public String getStmtInsertIntoConfigurable() {
return STMT_INSERT_INTO_CONFIGURABLE;
}
public String getStmtDeleteConfigsForConfigurable() {
return STMT_DELETE_CONFIGS_FOR_CONFIGURABLE;
}
public String getStmtDeleteDirectionsForConfigurable() {
return STMT_DELETE_DIRECTIONS_FOR_CONFIGURABLE;
}
}
|
|
/*
Copyright (c) 2012 Emitrom LLC. All rights reserved.
For licensing questions, please contact us at [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.emitrom.lienzo.client.core.shape;
import com.emitrom.lienzo.client.core.Attribute;
import com.emitrom.lienzo.client.core.Context2D;
import com.emitrom.lienzo.client.core.shape.json.IFactory;
import com.emitrom.lienzo.client.core.shape.json.ShapeFactory;
import com.emitrom.lienzo.client.core.shape.json.validators.ValidationContext;
import com.emitrom.lienzo.shared.core.types.ShapeType;
import com.google.gwt.json.client.JSONObject;
/**
* A Chord is defined by a radius, a start angle and an end angle. Effectively,
* a chord is a circle with a flat side, which is defined by the start and end angles.
* The angles can be specified in clockwise or counter-clockwise order.
*/
public class Chord extends Shape<Chord>
{
/**
* Constructor. Creates an instance of a chord.
*
* @param radius
* @param startAngle in radians
* @param endAngle in radians
* @param counterClockwise
*/
public Chord(double radius, double startAngle, double endAngle, boolean counterClockwise)
{
super(ShapeType.CHORD);
setRadius(radius).setStartAngle(startAngle).setEndAngle(endAngle).setCounterClockwise(counterClockwise);
}
/**
* Constructor. Creates an instance of a chord, drawn clockwise.
*
* @param radius
* @param startAngle in radians
* @param endAngle in radians
*/
public Chord(double radius, double startAngle, double endAngle)
{
super(ShapeType.CHORD);
setRadius(radius).setStartAngle(startAngle).setEndAngle(endAngle).setCounterClockwise(false);
}
protected Chord(JSONObject node)
{
super(ShapeType.CHORD, node);
}
/**
* Draws this chord.
*
* @param context
*/
@Override
public boolean prepare(Context2D context, Attributes attr, double alpha)
{
double beg = getStartAngle();
double end = getEndAngle();
context.beginPath();
context.arc(0, 0, getRadius(), beg, end, isCounterClockwise());
context.closePath();
return true;
}
/**
* Gets this chord's radius
*
* @return double
*/
public double getRadius()
{
return getAttributes().getRadius();
}
/**
* Sets this chord's radius.
*
* @param radius
* @return this chord.
*/
public Chord setRadius(double radius)
{
getAttributes().setRadius(radius);
return this;
}
/**
* Gets the starting angle of this chord.
*
* @return double in radians
*/
public double getStartAngle()
{
return getAttributes().getStartAngle();
}
/**
* Sets the starting angle of this chord.
*
* @param angle in radians
* @return this chord.
*/
public Chord setStartAngle(double angle)
{
getAttributes().setStartAngle(angle);
return this;
}
/**
* Gets the end angle of this chord.
*
* @return double in radians
*/
public double getEndAngle()
{
return getAttributes().getEndAngle();
}
/**
* Gets the end angle of this chord.
*
* @param angle in radians
* @return this chord.
*/
public Chord setEndAngle(double angle)
{
getAttributes().setEndAngle(angle);
return this;
}
/**
* Returns whether the chord is drawn counter clockwise.
* The default value is true.
*
* @return boolean
*/
public boolean isCounterClockwise()
{
return getAttributes().isCounterClockwise();
}
/**
* Sets whether the drawing direction of this chord is counter clockwise.
* The default value is true.
*
* @param counterclockwise
* @return this chord
*/
public Chord setCounterClockwise(boolean counterclockwise)
{
getAttributes().setCounterClockwise(counterclockwise);
return this;
}
@Override
public IFactory<?> getFactory()
{
return new ChordFactory();
}
public static class ChordFactory extends ShapeFactory<Chord>
{
public ChordFactory()
{
super(ShapeType.CHORD);
addAttribute(Attribute.RADIUS, true);
addAttribute(Attribute.START_ANGLE, true);
addAttribute(Attribute.END_ANGLE, true);
addAttribute(Attribute.COUNTER_CLOCKWISE);
}
@Override
public Chord create(JSONObject node, ValidationContext ctx)
{
return new Chord(node);
}
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.model;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.camel.CamelContext;
import org.apache.camel.Endpoint;
import org.apache.camel.Predicate;
import org.apache.camel.Processor;
import org.apache.camel.impl.InterceptSendToEndpoint;
import org.apache.camel.processor.InterceptEndpointProcessor;
import org.apache.camel.spi.AsPredicate;
import org.apache.camel.spi.EndpointStrategy;
import org.apache.camel.spi.Metadata;
import org.apache.camel.spi.RouteContext;
import org.apache.camel.util.EndpointHelper;
import org.apache.camel.util.URISupport;
/**
* Intercepts messages being sent to an endpoint
*
* @version
*/
@Metadata(label = "configuration")
@XmlRootElement(name = "interceptSendToEndpoint")
@XmlAccessorType(XmlAccessType.FIELD)
public class InterceptSendToEndpointDefinition extends OutputDefinition<InterceptSendToEndpointDefinition> {
// TODO: Support lookup endpoint by ref (requires a bit more work)
// TODO: interceptSendToEndpoint needs to proxy the endpoints at very first
// so when other processors uses an endpoint its already proxied, see workaround in SendProcessor
// needed when we haven't proxied beforehand. This requires some work in the route builder in Camel
// to implement so that should be a part of a bigger rework/improvement in the future
@XmlAttribute(required = true)
private String uri;
@XmlAttribute
private Boolean skipSendToOriginalEndpoint;
public InterceptSendToEndpointDefinition() {
}
public InterceptSendToEndpointDefinition(String uri) {
this.uri = uri;
}
@Override
public String toString() {
return "InterceptSendToEndpoint[" + uri + " -> " + getOutputs() + "]";
}
@Override
public String getLabel() {
return "interceptSendToEndpoint[" + uri + "]";
}
@Override
public boolean isAbstract() {
return true;
}
@Override
public boolean isTopLevelOnly() {
return true;
}
@Override
public Processor createProcessor(final RouteContext routeContext) throws Exception {
// create the detour
final Processor detour = this.createChildProcessor(routeContext, true);
final String matchURI = getUri();
// register endpoint callback so we can proxy the endpoint
routeContext.getCamelContext().addRegisterEndpointCallback(new EndpointStrategy() {
public Endpoint registerEndpoint(String uri, Endpoint endpoint) {
if (endpoint instanceof InterceptSendToEndpoint) {
// endpoint already decorated
return endpoint;
} else if (matchURI == null || matchPattern(routeContext.getCamelContext(), uri, matchURI)) {
// only proxy if the uri is matched decorate endpoint with our proxy
// should be false by default
boolean skip = getSkipSendToOriginalEndpoint() != null && getSkipSendToOriginalEndpoint();
InterceptSendToEndpoint proxy = new InterceptSendToEndpoint(endpoint, skip);
proxy.setDetour(detour);
return proxy;
} else {
// no proxy so return regular endpoint
return endpoint;
}
}
});
// remove the original intercepted route from the outputs as we do not intercept as the regular interceptor
// instead we use the proxy endpoints producer do the triggering. That is we trigger when someone sends
// an exchange to the endpoint, see InterceptSendToEndpoint for details.
RouteDefinition route = routeContext.getRoute();
List<ProcessorDefinition<?>> outputs = route.getOutputs();
outputs.remove(this);
return new InterceptEndpointProcessor(matchURI, detour);
}
/**
* Does the uri match the pattern.
*
* @param camelContext the CamelContext
* @param uri the uri
* @param pattern the pattern, which can be an endpoint uri as well
* @return <tt>true</tt> if matched and we should intercept, <tt>false</tt> if not matched, and not intercept.
*/
protected boolean matchPattern(CamelContext camelContext, String uri, String pattern) {
// match using the pattern as-is
boolean match = EndpointHelper.matchEndpoint(camelContext, uri, pattern);
if (!match) {
try {
// the pattern could be an uri, so we need to normalize it before matching again
pattern = URISupport.normalizeUri(pattern);
match = EndpointHelper.matchEndpoint(camelContext, uri, pattern);
} catch (Exception e) {
// ignore
}
}
return match;
}
/**
* Applies this interceptor only if the given predicate is true
*
* @param predicate the predicate
* @return the builder
*/
public InterceptSendToEndpointDefinition when(@AsPredicate Predicate predicate) {
WhenDefinition when = new WhenDefinition(predicate);
addOutput(when);
return this;
}
/**
* Skip sending the {@link org.apache.camel.Exchange} to the original intended endpoint
*
* @return the builder
*/
public InterceptSendToEndpointDefinition skipSendToOriginalEndpoint() {
setSkipSendToOriginalEndpoint(Boolean.TRUE);
return this;
}
/**
* This method is <b>only</b> for handling some post configuration
* that is needed since this is an interceptor, and we have to do
* a bit of magic logic to fixup to handle predicates
* with or without proceed/stop set as well.
*/
public void afterPropertiesSet() {
// okay the intercept endpoint works a bit differently than the regular interceptors
// so we must fix the route definition yet again
if (getOutputs().size() == 0) {
// no outputs
return;
}
// if there is a when definition at first, then its a predicate for this interceptor
ProcessorDefinition<?> first = getOutputs().get(0);
if (first instanceof WhenDefinition && !(first instanceof WhenSkipSendToEndpointDefinition)) {
WhenDefinition when = (WhenDefinition) first;
// create a copy of when to use as replacement
WhenSkipSendToEndpointDefinition newWhen = new WhenSkipSendToEndpointDefinition();
newWhen.setExpression(when.getExpression());
newWhen.setId(when.getId());
newWhen.setInheritErrorHandler(when.isInheritErrorHandler());
newWhen.setParent(when.getParent());
newWhen.setOtherAttributes(when.getOtherAttributes());
newWhen.setDescription(when.getDescription());
// move this outputs to the when, expect the first one
// as the first one is the interceptor itself
for (int i = 1; i < outputs.size(); i++) {
ProcessorDefinition<?> out = outputs.get(i);
newWhen.addOutput(out);
}
// remove the moved from the original output, by just keeping the first one
clearOutput();
outputs.add(newWhen);
}
}
public Boolean getSkipSendToOriginalEndpoint() {
return skipSendToOriginalEndpoint;
}
/**
* If set to true then the message is not sent to the original endpoint.
* By default (false) the message is both intercepted and then sent to the original endpoint.
*/
public void setSkipSendToOriginalEndpoint(Boolean skipSendToOriginalEndpoint) {
this.skipSendToOriginalEndpoint = skipSendToOriginalEndpoint;
}
public String getUri() {
return uri;
}
/**
* Intercept sending to the uri or uri pattern.
*/
public void setUri(String uri) {
this.uri = uri;
}
}
|
|
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.google.android.apps.exposurenotification.debug;
import androidx.lifecycle.LiveData;
import androidx.lifecycle.MutableLiveData;
import androidx.lifecycle.ViewModel;
import com.google.android.apps.exposurenotification.common.SingleLiveEvent;
import com.google.android.apps.exposurenotification.common.logging.Logger;
import com.google.android.apps.exposurenotification.nearby.ExposureNotificationClientWrapper;
import com.google.android.gms.common.api.ApiException;
import com.google.android.gms.nearby.exposurenotification.ExposureNotificationStatusCodes;
import com.google.android.gms.nearby.exposurenotification.TemporaryExposureKey;
import com.google.android.gms.tasks.Tasks;
import dagger.hilt.android.lifecycle.HiltViewModel;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
/**
* View model for {@link KeysMatchingFragment}.
*/
@HiltViewModel
public class KeysMatchingViewModel extends ViewModel {
private static final Logger logger = Logger.getLogger("KeysMatchingViewModel");
private final MutableLiveData<List<TemporaryExposureKey>> temporaryExposureKeysLiveData;
private final MutableLiveData<InFlightResolution> inFlightResolutionLiveData
= new MutableLiveData<>(new InFlightResolution(false));
private final SingleLiveEvent<Void> apiDisabledLiveEvent = new SingleLiveEvent<>();
private final SingleLiveEvent<Void> apiErrorLiveEvent = new SingleLiveEvent<>();
private final SingleLiveEvent<ResolutionRequiredEvent> resolutionRequiredLiveEvent
= new SingleLiveEvent<>();
private final SingleLiveEvent<Void> waitForKeyBroadcastsEvent = new SingleLiveEvent<>();
private final ExposureNotificationClientWrapper exposureNotificationClientWrapper;
@Inject
public KeysMatchingViewModel(
ExposureNotificationClientWrapper exposureNotificationClientWrapper) {
this.exposureNotificationClientWrapper = exposureNotificationClientWrapper;
temporaryExposureKeysLiveData = new MutableLiveData<>(new ArrayList<>());
}
/**
* An event that requests a resolution with the given {@link ApiException}.
*/
public SingleLiveEvent<ResolutionRequiredEvent> getResolutionRequiredLiveEvent() {
return resolutionRequiredLiveEvent;
}
/**
* An event that triggers when the API is disabled.
*/
public SingleLiveEvent<Void> getApiDisabledLiveEvent() {
return apiDisabledLiveEvent;
}
/**
* An event that triggers when there is an error in the API.
*/
public SingleLiveEvent<Void> getApiErrorLiveEvent() {
return apiErrorLiveEvent;
}
/**
* An event that triggers when keys will be broadcast to the app.
*/
public SingleLiveEvent<Void> getWaitForKeyBroadcastsEvent() {
return waitForKeyBroadcastsEvent;
}
/**
* The {@link LiveData} representing if there is an in-flight resolution.
*/
public LiveData<InFlightResolution> getInFlightResolutionLiveData() {
return inFlightResolutionLiveData;
}
/**
* The {@link LiveData} representing the {@link List} of {@link TemporaryExposureKey}.
*/
public LiveData<List<TemporaryExposureKey>> getTemporaryExposureKeysLiveData() {
return temporaryExposureKeysLiveData;
}
/**
* Requests updating the {@link TemporaryExposureKey} from GMSCore API.
*/
public void updateTemporaryExposureKeys() {
exposureNotificationClientWrapper
.isEnabled()
.continueWithTask(
isEnabled -> {
if (isEnabled.getResult()) {
return exposureNotificationClientWrapper.getTemporaryExposureKeyHistory();
} else {
apiDisabledLiveEvent.call();
return Tasks.forResult(new ArrayList<>());
}
})
.addOnSuccessListener(
temporaryExposureKeys -> temporaryExposureKeysLiveData.setValue(temporaryExposureKeys))
.addOnFailureListener(
exception -> {
if (!(exception instanceof ApiException)) {
logger.e("Unknown error when attempting to start API", exception);
apiErrorLiveEvent.call();
return;
}
ApiException apiException = (ApiException) exception;
if (apiException.getStatusCode()
== ExposureNotificationStatusCodes.RESOLUTION_REQUIRED) {
if (inFlightResolutionLiveData.getValue().hasInFlightResolution()) {
logger.e("Error, has in flight resolution", exception);
} else {
inFlightResolutionLiveData.setValue(
new InFlightResolution(
true, ResolutionType.GET_TEMPORARY_EXPOSURE_KEY_HISTORY));
resolutionRequiredLiveEvent.postValue(
new ResolutionRequiredEvent(
apiException, ResolutionType.GET_TEMPORARY_EXPOSURE_KEY_HISTORY));
}
} else {
logger.w("No RESOLUTION_REQUIRED in result", apiException);
apiErrorLiveEvent.call();
}
});
}
public void requestPreAuthorizationOfTemporaryExposureKeyHistory() {
exposureNotificationClientWrapper
.isEnabled()
.continueWithTask(
isEnabled -> {
if (isEnabled.getResult()) {
return
exposureNotificationClientWrapper
.requestPreAuthorizedTemporaryExposureKeyHistory();
} else {
apiDisabledLiveEvent.call();
return Tasks.forResult(null);
}
})
.addOnSuccessListener(
result -> inFlightResolutionLiveData.setValue(new InFlightResolution(false)))
.addOnFailureListener(
exception -> {
if (!(exception instanceof ApiException)) {
logger.e("Unknown error when attempting to start API", exception);
apiErrorLiveEvent.call();
return;
}
ApiException apiException = (ApiException) exception;
if (apiException.getStatusCode()
== ExposureNotificationStatusCodes.RESOLUTION_REQUIRED) {
if (inFlightResolutionLiveData.getValue().hasInFlightResolution()) {
logger.e("Error, has in flight resolution", exception);
} else {
inFlightResolutionLiveData.setValue(
new InFlightResolution(
true,
ResolutionType.PREAUTHORIZE_TEMPORARY_EXPOSURE_KEY_RELEASE));
resolutionRequiredLiveEvent.postValue(
new ResolutionRequiredEvent(
apiException,
ResolutionType.PREAUTHORIZE_TEMPORARY_EXPOSURE_KEY_RELEASE));
}
} else {
logger.w("No RESOLUTION_REQUIRED in result", apiException);
apiErrorLiveEvent.call();
}
});
}
public void requestPreAuthorizedReleaseOfTemporaryExposureKeys() {
exposureNotificationClientWrapper
.isEnabled()
.continueWithTask(
isEnabled -> {
if (isEnabled.getResult()) {
return
exposureNotificationClientWrapper
.requestPreAuthorizedTemporaryExposureKeyRelease();
} else {
apiDisabledLiveEvent.call();
return Tasks.forResult(null);
}
})
.addOnSuccessListener(
result -> {
waitForKeyBroadcastsEvent.call();
inFlightResolutionLiveData.setValue(
new InFlightResolution(
true,
ResolutionType.GET_PREAUTHORIZED_TEMPORARY_EXPOSURE_KEY_HISTORY));
})
.addOnFailureListener(
exception -> {
logger.e("Unknown error when attempting to start API", exception);
apiErrorLiveEvent.call();
});
}
public void handleTemporaryExposureKeys(List<TemporaryExposureKey> temporaryExposureKeys) {
inFlightResolutionLiveData.setValue(new InFlightResolution(false));
temporaryExposureKeysLiveData.setValue(temporaryExposureKeys);
}
/**
* Handles {@value android.app.Activity#RESULT_OK} for a resolution. User chose to share keys.
*/
public void startResolutionResultGetHistoryOk() {
inFlightResolutionLiveData.setValue(new InFlightResolution(false));
exposureNotificationClientWrapper
.getTemporaryExposureKeyHistory()
.addOnSuccessListener(this::handleTemporaryExposureKeys)
.addOnFailureListener(
exception -> {
logger.e("Error handling resolution", exception);
apiErrorLiveEvent.call();
});
}
public void startResolutionResultPreauthorizationOk() {
inFlightResolutionLiveData.setValue(new InFlightResolution(false));
}
/**
* Handles not {@value android.app.Activity#RESULT_OK} for a resolution. User chose not to share
* keys.
*/
public void startResolutionResultNotOk() {
inFlightResolutionLiveData.setValue(new InFlightResolution(false));
}
public enum ResolutionType {
UNKNOWN,
GET_TEMPORARY_EXPOSURE_KEY_HISTORY,
PREAUTHORIZE_TEMPORARY_EXPOSURE_KEY_RELEASE,
GET_PREAUTHORIZED_TEMPORARY_EXPOSURE_KEY_HISTORY,
}
public static class InFlightResolution {
private final boolean hasInFlightResolution;
private final ResolutionType resolutionType;
private InFlightResolution(boolean hasInFlightResolution) {
this(hasInFlightResolution, ResolutionType.UNKNOWN);
}
private InFlightResolution(
boolean hasInFlightResolution, ResolutionType resolutionType) {
this.hasInFlightResolution = hasInFlightResolution;
this.resolutionType = resolutionType;
}
public boolean hasInFlightResolution() {
return hasInFlightResolution;
}
public ResolutionType getResolutionType() {
return resolutionType;
}
}
public static class ResolutionRequiredEvent {
private final ApiException exception;
private final ResolutionType resolutionType;
private ResolutionRequiredEvent(ApiException exception, ResolutionType resolutionType) {
this.exception = exception;
this.resolutionType = resolutionType;
}
public ApiException getException() {
return exception;
}
public ResolutionType getResolutionType() {
return resolutionType;
}
}
}
|
|
package liquibase;
import liquibase.changelog.ChangeLogIterator;
import liquibase.changelog.ChangeLogParameters;
import liquibase.changelog.DatabaseChangeLog;
import liquibase.changelog.filter.ContextChangeSetFilter;
import liquibase.changelog.filter.DbmsChangeSetFilter;
import liquibase.changelog.filter.IgnoreChangeSetFilter;
import liquibase.changelog.filter.LabelChangeSetFilter;
import liquibase.changelog.filter.ShouldRunChangeSetFilter;
import liquibase.database.Database;
import liquibase.database.DatabaseConnection;
import liquibase.database.DatabaseFactory;
import liquibase.database.ObjectQuotingStrategy;
import liquibase.exception.ChangeLogParseException;
import liquibase.exception.LiquibaseException;
import liquibase.exception.LockException;
import liquibase.lockservice.LockService;
import liquibase.lockservice.LockServiceFactory;
import liquibase.logging.LogService;
import liquibase.logging.Logger;
import liquibase.logging.LoggerContext;
import liquibase.logging.LoggerFactory;
import liquibase.logging.core.NoOpLoggerContext;
import liquibase.parser.ChangeLogParser;
import liquibase.parser.ChangeLogParserFactory;
import liquibase.resource.ResourceAccessor;
import liquibase.sdk.database.MockDatabase;
import liquibase.sdk.resource.MockResourceAccessor;
import liquibase.test.Assert;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import java.util.HashMap;
import java.util.Map;
import static liquibase.test.Assert.assertListsEqual;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class LiquibaseTest {
private MockResourceAccessor mockResourceAccessor;
private Database mockDatabase;
private LockServiceFactory mockLockServiceFactory;
private LockService mockLockService;
private ChangeLogParserFactory mockChangeLogParserFactory;
private ChangeLogParser mockChangeLogParser;
private DatabaseChangeLog mockChangeLog;
private ChangeLogIterator mockChangeLogIterator;
private Logger mockLogger;
@Before
public void before() throws Exception {
mockResourceAccessor = new MockResourceAccessor();
mockDatabase = mock(Database.class);
mockLockService = mock(LockService.class);
mockLockServiceFactory = mock(LockServiceFactory.class);
mockChangeLogParserFactory = mock(ChangeLogParserFactory.class);
mockChangeLogParser = mock(ChangeLogParser.class);
mockChangeLog = mock(DatabaseChangeLog.class);
mockChangeLogIterator = mock(ChangeLogIterator.class);
mockLogger = mock(Logger.class);
LockServiceFactory.setInstance(mockLockServiceFactory);
when(mockLockServiceFactory.getLockService(any(Database.class))).thenReturn(mockLockService);
ChangeLogParserFactory.setInstance(mockChangeLogParserFactory);
when(mockChangeLogParserFactory.getParser(anyString(), Mockito.isA(ResourceAccessor.class))).thenReturn(mockChangeLogParser);
when(mockChangeLogParser.parse(anyString(), any(ChangeLogParameters.class), Mockito.isA(ResourceAccessor.class))).thenReturn(mockChangeLog);
LogService.setLoggerFactory(new LoggerFactory() {
@Override
public Logger getLog(Class clazz) {
return mockLogger;
}
@Override
public LoggerContext pushContext(String key, Object object) {
return new NoOpLoggerContext();
}
@Override
public void close() {
}
});
}
@After
public void after() {
verifyNoMoreInteractions(mockLockService, mockChangeLogParser, mockChangeLog, mockChangeLogIterator); //for no other interactions of normal use objects. Not automatically checking mockDatabase and the *Factory mocks
Mockito.reset(mockDatabase, mockLockServiceFactory, mockLockService, mockChangeLogParserFactory, mockChangeLogParser, mockChangeLog, mockChangeLogIterator);
LockServiceFactory.reset();
ChangeLogParserFactory.reset();
}
@Test
public void testConstructor() {
MockResourceAccessor resourceAccessor = this.mockResourceAccessor;
MockDatabase database = new MockDatabase();
Liquibase liquibase = new Liquibase("com/example/test.xml", resourceAccessor, database);
assertNotNull("change log object may not be null", liquibase.getLog());
assertEquals("correct name of the change log file is returned",
"com/example/test.xml", liquibase.getChangeLogFile());
assertSame("ressourceAccessor property is set as requested",
resourceAccessor, liquibase.getResourceAccessor());
assertNotNull("parameters list for the change log is not null",
liquibase.getChangeLogParameters());
assertEquals("Standard database changelog parameters were not set",
"DATABASECHANGELOGLOCK",
liquibase.getChangeLogParameters().getValue("database.databaseChangeLogLockTableName", null)
);
assertSame("database object for the change log is set as requested",
database, liquibase.getDatabase());
}
@Test
public void testConstructorChangelogPathsStandardize() {
Liquibase liquibase = new Liquibase("path\\with\\windows\\separators.xml", mockResourceAccessor, new MockDatabase());
assertEquals("Windows path separators are translated correctly",
"path/with/windows/separators.xml", liquibase.getChangeLogFile());
liquibase = new Liquibase("path/with/unix/separators.xml", mockResourceAccessor, new MockDatabase());
assertEquals("Unix path separators are left intact",
"path/with/unix/separators.xml", liquibase.getChangeLogFile());
liquibase = new Liquibase("/absolute/path/remains.xml", mockResourceAccessor, new MockDatabase());
assertEquals("An absolute path is left intact",
"/absolute/path/remains.xml", liquibase.getChangeLogFile());
}
@Test
public void testConstructorCreateDatabaseInstanceFromConnection() throws LiquibaseException {
DatabaseConnection databaseConnection = mock(DatabaseConnection.class);
Database database = mockDatabase;
try {
DatabaseFactory.setInstance(mock(DatabaseFactory.class));
when(DatabaseFactory.getInstance().findCorrectDatabaseImplementation(databaseConnection)).thenReturn(database);
Liquibase liquibase = new Liquibase("com/example/test.xml", mockResourceAccessor, databaseConnection);
assertSame("DB-Manul constructor passing connection did not find the correct database implementation",
database, liquibase.getDatabase());
} finally {
DatabaseFactory.reset();
}
}
@Test
public void testGetResourceAccessor() {
Liquibase liquibase = new Liquibase("com/example/test.xml", mockResourceAccessor, mockDatabase);
assertSame("ressourceAccessor is set as requested",
liquibase.getResourceAccessor(), liquibase.getResourceAccessor());
}
@Test
public void testSetCurrentDateTimeFunction() {
Database database = mockDatabase;
String testFunction = "GetMyTime";
new Liquibase("com/example/test.xml", mockResourceAccessor, database)
.getDatabase()
.setCurrentDateTimeFunction(testFunction);
verify(database).setCurrentDateTimeFunction(testFunction);
}
@Test
public void testUpdatePassedStringContext() throws LiquibaseException {
LiquibaseDelegate liquibase = new LiquibaseDelegate() {
@Override
public void update(Contexts contexts) {
objectToVerify = contexts;
}
};
liquibase.update("test");
assertEquals("context is set correctly", "test", liquibase.objectToVerify.toString());
liquibase.reset();
liquibase.update("");
assertEquals("context is set correctly", "", liquibase.objectToVerify.toString());
liquibase.reset();
liquibase.update((String) null);
assertEquals("context is set correctly", "", liquibase.objectToVerify.toString());
liquibase.reset();
liquibase.update("test1, test2");
assertEquals("context is set correctly", "test1,test2", liquibase.objectToVerify.toString());
liquibase.reset();
}
@Test(expected = LockException.class)
public void testUpdateExceptionGettingLock() throws LiquibaseException {
doThrow(LockException.class).when(mockLockService).waitForLock();
Liquibase liquibase = new Liquibase("com/example/test.xml", mockResourceAccessor, mockDatabase);
try {
liquibase.update((Contexts) null);
} finally {
verify(mockLockService).waitForLock();
//should not call anything else, even releaseLock()
}
}
@Test(expected = ChangeLogParseException.class)
public void testUpdateExceptionDoingUpdate() throws LiquibaseException {
Contexts contexts = new Contexts("a,b");
Liquibase liquibase = new Liquibase("com/example/test.xml", mockResourceAccessor, mockDatabase);
doThrow(ChangeLogParseException.class).when(mockChangeLogParser).parse("com/example/test.xml", liquibase.getChangeLogParameters(), mockResourceAccessor);
try {
liquibase.update(contexts);
} finally {
verify(mockLockService).waitForLock();
verify(mockLockService).releaseLock(); //should still call
verify(mockDatabase).setObjectQuotingStrategy(ObjectQuotingStrategy.LEGACY); //should still call
verify(mockChangeLogParser).parse("com/example/test.xml", liquibase.getChangeLogParameters(), mockResourceAccessor);
}
}
@Test
/* False positive: We do have an assertion in this test. */
@SuppressWarnings("squid:S2699")
public void testGetStandardChangelogIterator() throws LiquibaseException {
ChangeLogIterator iterator =
new Liquibase(
"com/example/changelog.xml",
mockResourceAccessor,
mockDatabase
).getStandardChangelogIterator(
new Contexts("a", "b"),
new LabelExpression("x", "y"),
mockChangeLog
);
assertListsEqual(new Class[] {ShouldRunChangeSetFilter.class,
ContextChangeSetFilter.class,
LabelChangeSetFilter.class,
DbmsChangeSetFilter.class,
IgnoreChangeSetFilter.class},
iterator.getChangeSetFilters(), new Assert.AssertFunction() {
@Override
public void check(String message, Object expected, Object actual) {
assertEquals(message, expected, actual.getClass());
}
});
}
/**
* Convenience helper class for testing Liquibase methods that simply delegate to another.
* To use, create a subclass that overrides the method delegated to with an implementation that stores whatever params are being passed.
* After calling the delegating method in your test, assert against the objectToVerify
*/
private static class LiquibaseDelegate extends Liquibase {
/**
* If using multiple parameters, store them here
*/
protected final Map<String, Object> objectsToVerify = new HashMap<>();
/**
* If using a single parameter, store in here
*/
protected Object objectToVerify;
private LiquibaseDelegate() {
super("com/example/test.xml", new MockResourceAccessor(), mock(Database.class));
}
/**
* Resets the object(s)ToVerify so this delegate can be reused in a test.
*/
public void reset() {
objectToVerify = null;
objectsToVerify.clear();
}
}
}
|
|
/*
***************************************************************************
* Mica - the Java(tm) Graphics Framework *
***************************************************************************
* NOTICE: Permission to use, copy, and modify this software and its *
* documentation is hereby granted provided that this notice appears in *
* all copies. *
* *
* Permission to distribute un-modified copies of this software and its *
* documentation is hereby granted provided that no fee is charged and *
* that this notice appears in all copies. *
* *
* SOFTWARE FARM MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE *
* SUITABILITY OF THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT *
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR *
* A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SOFTWARE FARM SHALL NOT BE *
* LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR *
* CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE, MODIFICATION OR *
* DISTRIBUTION OF THIS SOFTWARE OR ITS DERIVATIVES. *
* *
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND *
* DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, *
* UPDATES, ENHANCEMENTS, OR MODIFICATIONS. *
* *
***************************************************************************
* Copyright (c) 1997-2004 Software Farm, Inc. All Rights Reserved. *
***************************************************************************
*/
package com.swfm.mica;
import java.util.Vector;
import java.io.*;
import com.swfm.mica.util.TextFile;
import com.swfm.mica.util.Utility;
// FIX: TO DO: break this up into 4 + 1 classes
/**
* @version %I% %G%
* @author Michael L. Davis
* @release 1.4.0(Beta)
* @module %M%
* @language Java (JDK 1.4)
*/
public class MiHelpManager
{
public static final int NO_HELP_AVAILABLE_ID = 0;
private static final String NO_HELP_AVAILABLE_MESSAGE = "No Help Available";
private boolean toolHintHelpEnabled = true;
private boolean balloonHelpEnabled = true;
private boolean statusHelpEnabled = true;
private boolean dialogHelpEnabled = true;
private MiiHelpFile toolHintHelpFile;
private MiiHelpFile balloonHelpFile;
private MiiHelpFile statusHelpFile;
private MiiHelpFile dialogHelpFile;
private MiPart aboutDialog;
private MiPart helpOnApplicationDialog;
private String[] noHelpAvailableMessage = new String[4];
public static final int TOOLTIP_HELP_TYPE = 0;
public static final int BALLOON_HELP_TYPE = 1;
public static final int STATUS_HELP_TYPE = 2;
public static final int DIALOG_HELP_TYPE = 3;
public static final int SHOW_HELP_NOT_FOUND_MESSAGES_DEBUG_MODE = 1;
public static final int SHOW_NAME_OF_OBJECT_WITHOUT_HELP_DEBUG_MODE = 2;
private int debugMode = MiiTypes.Mi_NONE;
/*
private int debugMode = SHOW_HELP_NOT_FOUND_MESSAGES_DEBUG_MODE
+ SHOW_NAME_OF_OBJECT_WITHOUT_HELP_DEBUG_MODE;
*/
private int toolHintMethodology = METHODOLOGY_SHOWING_HELPTEXT_ASSIGNED_TO_OBJECT;
private int balloonMethodology = METHODOLOGY_SHOWING_HELPTEXT_ASSIGNED_TO_OBJECT;
private int statusMethodology = METHODOLOGY_SHOWING_HELPTEXT_ASSIGNED_TO_OBJECT;
private int dialogMethodology = METHODOLOGY_SHOWING_HELPTEXT_ASSIGNED_TO_OBJECT;
public static final int METHODOLOGY_SHOWING_HELPTEXT_ASSIGNED_TO_OBJECT = 0;
public static final int METHODOLOGY_SHOWING_ONE_MESSAGE_USING_OBJECT_NAME_AS_KEY_IN_FILE = 1;
public static final int METHODOLOGY_SCROLLING_TO_MESSAGE_USING_OBJECT_NAME_AS_KEY_IN_FILE = 2;
public static final int METHODOLOGY_SHOWING_ONE_MESSAGE_USING_OBJECT_HELPTEXT_AS_KEY_IN_FILE = 3;
public static final int METHODOLOGY_SCROLLING_TO_MESSAGE_USING_OBJECT_HELPTEXT_AS_KEY_IN_FILE = 4;
public static final int METHODOLOGY_USING_HELPTEXT_AS_FILENAME = 5;
public MiHelpManager()
{
noHelpAvailableMessage[TOOLTIP_HELP_TYPE] = NO_HELP_AVAILABLE_MESSAGE;
noHelpAvailableMessage[BALLOON_HELP_TYPE] = NO_HELP_AVAILABLE_MESSAGE;
noHelpAvailableMessage[STATUS_HELP_TYPE] = NO_HELP_AVAILABLE_MESSAGE;
noHelpAvailableMessage[DIALOG_HELP_TYPE] = NO_HELP_AVAILABLE_MESSAGE;
}
public void setBalloonHelpEnabled(boolean flag)
{
balloonHelpEnabled = flag;
}
public boolean getBalloonHelpEnabled()
{
return(balloonHelpEnabled);
}
public void setToolHintHelpEnabled(boolean flag)
{
toolHintHelpEnabled = flag;
}
public boolean getToolHintHelpEnabled()
{
return(toolHintHelpEnabled);
}
public void setStatusHelpEnabled(boolean flag)
{
statusHelpEnabled = flag;
}
public boolean getStatusHelpEnabled()
{
return(statusHelpEnabled);
}
public void setDialogHelpEnabled(boolean flag)
{
dialogHelpEnabled = flag;
}
public boolean getDialogHelpEnabled()
{
return(dialogHelpEnabled);
}
public void setToolHintMethodology(int methodology)
{
toolHintMethodology = methodology;
}
public int getToolHintMethodology()
{
return(toolHintMethodology);
}
public void setStatusHelpMethodology(int methodology)
{
statusMethodology = methodology;
}
public int getStatusHelpMethodology()
{
return(statusMethodology);
}
public void setBalloonHelpMethodology(int methodology)
{
balloonMethodology = methodology;
}
public int getBalloonHelpMethodology()
{
return(balloonMethodology);
}
public void setDialogHelpMethodology(int methodology)
{
dialogMethodology = methodology;
}
public int getDialogHelpMethodology()
{
return(dialogMethodology);
}
public void setToolHintHelpFilename(String name)
{ toolHintHelpFile = new MiDotKeyHelpFile(name, true); }
public void setBallonHelpFilename(String name)
{ balloonHelpFile = new MiDotKeyHelpFile(name, true); }
public void setStatusHelpFilename(String name)
{ statusHelpFile = new MiDotKeyHelpFile(name, true); }
public void setDialogHelpFilename(String name)
{ dialogHelpFile = new MiDotKeyHelpFile(name, true); }
public void setToolHintHelpFile(MiiHelpFile file)
{ toolHintHelpFile = file; }
public void setBalloonHelpFile(MiiHelpFile file)
{ balloonHelpFile = file; }
public void setStatusHelpFile(MiiHelpFile file)
{ statusHelpFile = file; }
public void setDialogHelpFile(MiiHelpFile file)
{ dialogHelpFile = file; }
public void setNoToolHintHelpAvailableMessage(String msg)
{ noHelpAvailableMessage[TOOLTIP_HELP_TYPE] = msg; }
public void setNoBalloonHelpAvailableMessage(String msg)
{ noHelpAvailableMessage[BALLOON_HELP_TYPE] = msg; }
public void setNoStatusHelpAvailableMessage(String msg)
{ noHelpAvailableMessage[STATUS_HELP_TYPE] = msg; }
public void setNoDialogHelpAvailableMessage(String msg)
{ noHelpAvailableMessage[DIALOG_HELP_TYPE] = msg; }
public String getToolHintForObject(MiPart obj, MiPoint point)
{
if (!toolHintHelpEnabled)
return(null);
return(getHelpForObject(obj, TOOLTIP_HELP_TYPE, point));
}
public String getStatusHelpForObject(MiPart obj, MiPoint point)
{
if (!statusHelpEnabled)
return(null);
return(getHelpForObject(obj, STATUS_HELP_TYPE, point));
}
public String getBalloonHelpForObject(MiPart obj, MiPoint point)
{
if (!balloonHelpEnabled)
return(null);
return(getHelpForObject(obj, BALLOON_HELP_TYPE, point));
}
public String getDialogHelpForObject(MiPart obj, MiPoint point)
{
if (!dialogHelpEnabled)
return(null);
return(getHelpForObject(obj, DIALOG_HELP_TYPE, point));
}
public void setHelpOnApplicationDialog(MiPart dialog)
{
helpOnApplicationDialog = dialog;
}
public MiPart getHelpOnApplicationDialog()
{
return(helpOnApplicationDialog);
}
public void displayHelpOnApplication()
{
if (helpOnApplicationDialog != null)
helpOnApplicationDialog.setVisible(true);
}
public void setAboutDialog(MiPart aboutDialog)
{
this.aboutDialog = aboutDialog;
}
public MiPart getAboutDialog()
{
return(aboutDialog);
}
public void displayAboutDialog()
{
if (aboutDialog != null)
aboutDialog.setVisible(true);
}
private boolean helpTextIsNeeded(int methodology)
{
if ((methodology != METHODOLOGY_SCROLLING_TO_MESSAGE_USING_OBJECT_NAME_AS_KEY_IN_FILE)
&& (methodology != METHODOLOGY_SHOWING_ONE_MESSAGE_USING_OBJECT_NAME_AS_KEY_IN_FILE))
{
return(true);
}
return(false);
}
private String getHelpForObject(MiPart obj, int helpType, MiPoint point)
{
boolean getHelpText = false;
MiiHelpInfo helpInfo = null;
MiiHelpFile helpFile;
String helpText = new String();
int methodology;
switch (helpType)
{
case TOOLTIP_HELP_TYPE:
helpFile = toolHintHelpFile;
methodology = toolHintMethodology;
getHelpText = helpTextIsNeeded(methodology);
if (getHelpText)
{
helpInfo = obj.getToolHintHelp(point);
if ((helpInfo != null) && (helpInfo.isEnabled()))
helpText = helpInfo.getMessage();
}
break;
case BALLOON_HELP_TYPE:
helpFile = balloonHelpFile;
methodology = balloonMethodology;
getHelpText = helpTextIsNeeded(methodology);
if (getHelpText)
{
helpInfo = obj.getBalloonHelp(point);
if ((helpInfo != null) && (helpInfo.isEnabled()))
helpText = helpInfo.getMessage();
}
break;
case STATUS_HELP_TYPE:
helpFile = statusHelpFile;
methodology = statusMethodology;
getHelpText = helpTextIsNeeded(methodology);
if (getHelpText)
{
helpInfo = obj.getStatusHelp(point);
if ((helpInfo != null) && (helpInfo.isEnabled()))
helpText = helpInfo.getMessage();
}
break;
case DIALOG_HELP_TYPE:
helpFile = dialogHelpFile;
methodology = dialogMethodology;
getHelpText = helpTextIsNeeded(methodology);
if (getHelpText)
{
helpInfo = obj.getDialogHelp(point);
if ((helpInfo != null) && (helpInfo.isEnabled()))
helpText = helpInfo.getMessage();
}
break;
default:
throw new IllegalArgumentException(this + ": Unknown help type: " + helpType);
}
if (getHelpText && (helpInfo == null))
return(getDebugMessage(obj, helpType));
switch (methodology)
{
case METHODOLOGY_SHOWING_HELPTEXT_ASSIGNED_TO_OBJECT:
{
return((helpText.length() == 0) ? null : helpText);
}
case METHODOLOGY_SHOWING_ONE_MESSAGE_USING_OBJECT_NAME_AS_KEY_IN_FILE:
{
String key = obj.getName();
String s = helpFile.getMessageAssignedToKey(key);
if (s != null)
return(s);
return(getDebugMessage(obj, helpType));
}
case METHODOLOGY_SCROLLING_TO_MESSAGE_USING_OBJECT_NAME_AS_KEY_IN_FILE:
{
MiDebug.printlnError("Scrolling text file NOT IMPLEMENTED");
/*
if (!helpViewer.scrollToMessageAssignedToKey(filename, obj.getName()))
return(getDebugMessage(obj, helpType));
return(null);
*/
}
case METHODOLOGY_SHOWING_ONE_MESSAGE_USING_OBJECT_HELPTEXT_AS_KEY_IN_FILE:
{
String s = helpFile.getMessageAssignedToKey(helpText);
return((s == null) ? getDebugMessage(obj, helpType) : s);
}
case METHODOLOGY_SCROLLING_TO_MESSAGE_USING_OBJECT_HELPTEXT_AS_KEY_IN_FILE:
{
MiDebug.printlnError("Scrolling text file NOT IMPLEMENTED");
/*
if (!helpViewer.scrollToMessageAssignedToKey(filename, helpText))
return(getDebugMessage(obj, helpType));
return(null);
*/
}
case METHODOLOGY_USING_HELPTEXT_AS_FILENAME:
{
String msg = loadHelpTextFile(helpText);
return((msg == null) ? getDebugMessage(obj, helpType) : msg);
}
}
return((helpText.length() == 0) ? null : helpText);
}
private String loadHelpTextFile(String filename)
{
BufferedReader file = Utility.openInputFile(filename);
if (file == null)
return(null);
String line;
String text = new String();
while ((line = Utility.readLine(file)) != null)
{
text = text.concat(line);
}
return(text);
}
private String getDebugMessage(MiPart obj, int helpType)
{
if (debugMode == SHOW_HELP_NOT_FOUND_MESSAGES_DEBUG_MODE)
return(noHelpAvailableMessage[helpType]);
if (debugMode == SHOW_NAME_OF_OBJECT_WITHOUT_HELP_DEBUG_MODE)
return(obj.toString());
if (debugMode == SHOW_NAME_OF_OBJECT_WITHOUT_HELP_DEBUG_MODE
+ SHOW_HELP_NOT_FOUND_MESSAGES_DEBUG_MODE)
return(noHelpAvailableMessage[helpType] + " for: " + obj.toString());
return(null);
}
}
|
|
/*******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2015 Neustar Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*******************************************************************************/
package com.neulevel.epp.xri;
import java.util.*;
import org.w3c.dom.*;
import com.neulevel.epp.core.EppEntity;
import com.neulevel.epp.core.EppE164;
import com.neulevel.epp.core.EppContactData;
import com.neulevel.epp.core.EppUtil;
/**
* This <code>EppXriSocialData</code> class defines social
* information associated with XRI authority objects. It
* implements XRI socialDataType and chgSocialDataType defined
* in the XRI authority schema file.
*
* @author Ning Zhang [email protected]
* @version $Revision: 1.3 $ $Date: 2010/08/20 21:33:25 $
*/
public class EppXriSocialData extends EppEntity
{
private EppContactData postalInfo;
private EppE164 voicePrimary;
private EppE164 voiceSecondary;
private boolean voiceNullified;
private EppE164 fax;
private boolean faxNullified;
private EppE164 pager;
private boolean pagerNullified;
private String emailPrimary;
private String emailSecondary;
private boolean emailNullified;
/**
* Creates an <code>EppXriSocialData</code> object
*/
public EppXriSocialData()
{
this.postalInfo = null;
this.voicePrimary = null;
this.voiceSecondary = null;
this.fax = null;
this.pager = null;
this.emailPrimary = null;
this.emailSecondary = null;
this.faxNullified = false;
this.pagerNullified = false;
this.voiceNullified = false;
this.emailNullified = false;
}
/**
* Sets up postal information of the XRI authority
*/
public void setPostalInfo( EppContactData postalInfo )
{
this.postalInfo = postalInfo;
}
/**
* Gets the postal information of the XRI authoruty
*/
public EppContactData getPostalInfo()
{
return this.postalInfo;
}
/**
* Returns true if voice numbers are to be nullified via an EPP update command
*/
public boolean isVoiceNullified()
{
return this.voiceNullified;
}
/**
* Gets the primary voice phone number
*/
public EppE164 getPrimaryVoice()
{
return this.voicePrimary;
}
/**
* Sets the primary voice phone number
*/
public void setPrimaryVoice( String voice )
{
this.voicePrimary = new EppE164(voice);
this.voiceNullified = false;
}
/**
* Sets the primary voice phone number and extenstion
*/
public void setPrimaryVoice( String voice, String ext )
{
this.voicePrimary = new EppE164(voice, ext);
this.voiceNullified = false;
}
/**
* Sets the primary voice phone number
*/
public void setPrimaryVoice( EppE164 voice )
{
this.voicePrimary = voice;
this.voiceNullified = false;
}
/**
* Gets the secondary voice phone number
*/
public EppE164 getSecondaryVoice()
{
return this.voiceSecondary;
}
/**
* Sets the secondary voice phone number
*/
public void setSecondaryVoice( String voice )
{
this.voiceSecondary= new EppE164(voice);
this.voiceNullified = false;
}
/**
* Sets the secondary voice phone number and extenstion
*/
public void setSecondaryVoice( String voice, String ext )
{
this.voiceSecondary= new EppE164(voice, ext);
this.voiceNullified = false;
}
/**
* Sets the secondary voice phone number
*/
public void setSecondaryVoice( EppE164 voice )
{
this.voiceSecondary = voice;
this.voiceNullified = false;
}
/**
* Nullifies the voice number via an EPP update command
*/
public void nullifyVoice()
{
this.voicePrimary = null;
this.voiceSecondary = null;
this.voiceNullified = true;
}
/**
* Returns true if fax is to be nullified via an EPP update command
*/
public boolean isFaxNullified()
{
return this.faxNullified;
}
/**
* Gets the fax number
*/
public EppE164 getFax()
{
return this.fax;
}
/**
* Sets the fax number
*/
public void setFax( String fax )
{
this.fax = new EppE164(fax);
this.faxNullified = false;
}
/**
* Sets the fax number and extenstion
*/
public void setFax( String fax, String ext )
{
this.fax = new EppE164(fax, ext);
this.faxNullified = false;
}
/**
* Sets the fax number
*/
public void setFax( EppE164 fax )
{
this.fax = fax;
this.faxNullified = false;
}
/**
* Nullifies the fax number via an EPP update command
*/
public void nullifyFax()
{
this.fax = null;
this.faxNullified = true;
}
/**
* Returns true if pager is to be nullified via an EPP update command
*/
public boolean isPagerNullified()
{
return this.pagerNullified;
}
/**
* Gets the pager number
*/
public EppE164 getPager()
{
return this.pager;
}
/**
* Sets the pager number
*/
public void setPager( String pager )
{
this.pager = new EppE164(pager);
this.pagerNullified = false;
}
/**
* Sets the pager number and extenstion
*/
public void setPager( String pager, String ext )
{
this.pager = new EppE164(pager, ext);
this.pagerNullified = false;
}
/**
* Sets the pager number
*/
public void setPager( EppE164 pager )
{
this.pager = pager;
this.pagerNullified = false;
}
/**
* Nullifies the pager number via an EPP update command
*/
public void nullifyPager()
{
this.pager = null;
this.pagerNullified = true;
}
/**
* Returns true if email addresses are to be nullified via an EPP update command
*/
public boolean isEmailNullified()
{
return this.emailNullified;
}
/**
* Gets the primary email address
*/
public String getPrimaryEmail()
{
return this.emailPrimary;
}
/**
* Sets the primary email address
*/
public void setPrimaryEmail( String email )
{
this.emailPrimary = email;
this.emailNullified = false;
}
/**
* Gets the secondary email address
*/
public String getSecondaryEmail()
{
return this.emailSecondary;
}
/**
* Sets the secondary email address
*/
public void setSecondaryEmail( String email )
{
this.emailSecondary = email;
this.emailNullified = false;
}
/**
* Nullifies the email addresses via an EPP update command
*/
public void nullifyEmail()
{
this.emailPrimary = null;
this.emailSecondary = null;
this.emailNullified = true;
}
/**
* Converts the <code>EppXriSocialData</code> object into an XML element
*
* @param doc the XML <code>Document</code> object
* @param tag the tag/element name for the <code>EppXriSocialData</code> object
*
* @return an <code>Element</code> object
*/
public Element toXML( Document doc, String tag )
{
Element body = doc.createElement(tag);
Element elm;
if( this.postalInfo != null )
{
body.appendChild(this.postalInfo.toXML(doc, "postalInfo"));
}
if( this.voiceNullified == true )
{
elm = doc.createElement("voice");
body.appendChild(elm);
}
else
{
if( this.voicePrimary != null )
{
body.appendChild(this.voicePrimary.toXML(doc, "voice"));
}
if( this.voiceSecondary != null )
{
body.appendChild(this.voiceSecondary.toXML(doc, "voice"));
}
}
if( this.faxNullified == true )
{
elm = doc.createElement("fax");
body.appendChild(elm);
}
else if( this.fax != null )
{
body.appendChild(this.fax.toXML(doc, "fax"));
}
if( this.pagerNullified == true )
{
elm = doc.createElement("pager");
body.appendChild(elm);
}
else if( this.pager != null )
{
body.appendChild(this.pager.toXML(doc, "pager"));
}
if( this.emailNullified == true )
{
elm = doc.createElement("email");
body.appendChild(elm);
}
else
{
if( this.emailPrimary != null )
{
elm = doc.createElement("email");
elm.appendChild(doc.createTextNode(this.emailPrimary));
body.appendChild(elm);
}
if( this.emailSecondary != null )
{
elm = doc.createElement("email");
elm.appendChild(doc.createTextNode(this.emailSecondary));
body.appendChild(elm);
}
}
return body;
}
/**
* Converts an XML element into an <code>EppXriSocialData</code> object.
* The caller of this method must make sure that the root node is of
* the EPP XRI socialDataType or chgSocialDataType.
*
* @param root root node for an <code>EppXriSocialData</code> object in
* XML format
*
* @return an <code>EppXriSocialData</code> object, or null if the node is
* invalid
*/
public static EppEntity fromXML( Node root )
{
EppXriSocialData data = null;
int voiceCount = 0;
int emailCount = 0;
NodeList list = root.getChildNodes();
for( int i = 0; i < list.getLength(); i++ )
{
Node node = list.item(i);
String name = node.getLocalName();
if( name == null )
{
continue;
}
if( name.equals("postalInfo") )
{
EppContactData postalInfo = (EppContactData) EppContactData.fromXML(node);
if( postalInfo != null )
{
if( data == null )
{
data = new EppXriSocialData();
}
data.setPostalInfo(postalInfo);
}
}
else if( name.equals("voice") && (voiceCount < 2) )
{
EppE164 voice = (EppE164) EppE164.fromXML(node);
if( voice != null )
{
if( data == null )
{
data = new EppXriSocialData();
}
if((voice.getNumber() != null) && (voice.getNumber().length() > 0))
{
if( voiceCount == 0 )
{
data.setPrimaryVoice(voice);
}
else if( voiceCount == 1 )
{
data.setSecondaryVoice(voice);
}
}
voiceCount++;
if( ((voice.getExtension() == null) || (voice.getExtension().length() == 0))
&& ((voice.getNumber() == null) || (voice.getNumber().length() == 0)) )
{
//data.nullifyVoice();
}
}
}
else if( name.equals("fax") )
{
EppE164 fax = (EppE164) EppE164.fromXML(node);
if( fax != null )
{
if( data == null )
{
data = new EppXriSocialData();
}
if( ((fax.getNumber() == null) || (fax.getNumber().length() == 0)) )
{
data.nullifyFax();
}
else
{
data.setFax(fax);
}
}
}
else if( name.equals("pager") )
{
EppE164 pager = (EppE164) EppE164.fromXML(node);
if( pager != null )
{
if( data == null )
{
data = new EppXriSocialData();
}
if( ((pager.getNumber() == null) || (pager.getNumber().length() == 0)) )
{
data.nullifyPager();
}
else
{
data.setPager(pager);
}
}
}
else if( name.equals("email") && (emailCount < 2) )
{
String email = EppUtil.getText(node);
if( email != null )
{
if( data == null )
{
data = new EppXriSocialData();
}
if( emailCount == 0 )
{
data.setPrimaryEmail(email);
}
else if( emailCount == 1 )
{
data.setSecondaryEmail(email);
}
emailCount++;
}
}
}
if((data.getPrimaryEmail() == null) && (data.getSecondaryEmail() == null))
{
data.nullifyEmail();
}
if ((data.getPrimaryVoice() == null) && (data.getSecondaryVoice() == null))
{
data.nullifyVoice();
}
return data;
}
public String toString()
{
return toString("socialData");
}
}
|
|
/*
* Copyright 2015 Goldman Sachs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl.map.immutable;
import com.gs.collections.api.list.MutableList;
import com.gs.collections.api.map.ImmutableMap;
import com.gs.collections.api.tuple.Pair;
import com.gs.collections.impl.block.factory.Functions;
import com.gs.collections.impl.block.function.PassThruFunction0;
import com.gs.collections.impl.block.procedure.CollectionAddProcedure;
import com.gs.collections.impl.factory.Lists;
import com.gs.collections.impl.list.mutable.FastList;
import com.gs.collections.impl.test.Verify;
import com.gs.collections.impl.tuple.Tuples;
import org.junit.Assert;
import org.junit.Test;
/**
* JUnit test for {@link ImmutableQuadrupletonMap}.
*/
public class ImmutableQuadrupletonMapTest extends ImmutableMemoryEfficientMapTestCase
{
@Override
protected ImmutableMap<Integer, String> classUnderTest()
{
return new ImmutableQuadrupletonMap<>(1, "1", 2, "2", 3, "3", 4, "4");
}
@Override
protected int size()
{
return 4;
}
@Override
@Test
public void equalsAndHashCode()
{
super.equalsAndHashCode();
ImmutableMap<Integer, String> map1 = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
ImmutableMap<Integer, String> map2 = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
Verify.assertEqualsAndHashCode(map1, map2);
}
@Override
@Test
public void forEachValue()
{
super.forEachValue();
MutableList<String> collection = Lists.mutable.of();
this.classUnderTest().forEachValue(CollectionAddProcedure.on(collection));
Assert.assertEquals(FastList.newListWith("1", "2", "3", "4"), collection);
}
@Override
@Test
public void forEachKey()
{
super.forEachKey();
MutableList<Integer> collection = Lists.mutable.of();
this.classUnderTest().forEachKey(CollectionAddProcedure.on(collection));
Assert.assertEquals(FastList.newListWith(1, 2, 3, 4), collection);
}
@Override
@Test
public void getIfAbsent_function()
{
super.getIfAbsent_function();
ImmutableMap<Integer, String> map = this.classUnderTest();
Assert.assertNull(map.get(5));
Assert.assertEquals("5", map.getIfAbsent(5, new PassThruFunction0<>("5")));
Assert.assertNull(map.get(5));
}
@Override
@Test
public void getIfAbsent()
{
super.getIfAbsent();
ImmutableMap<Integer, String> map = this.classUnderTest();
Assert.assertNull(map.get(5));
Assert.assertEquals("5", map.getIfAbsentValue(5, "5"));
Assert.assertNull(map.get(5));
}
@Override
@Test
public void ifPresentApply()
{
super.ifPresentApply();
ImmutableMap<Integer, String> map = this.classUnderTest();
Assert.assertNull(map.ifPresentApply(5, Functions.<String>getPassThru()));
Assert.assertEquals("1", map.ifPresentApply(1, Functions.<String>getPassThru()));
Assert.assertEquals("2", map.ifPresentApply(2, Functions.<String>getPassThru()));
Assert.assertEquals("3", map.ifPresentApply(3, Functions.<String>getPassThru()));
Assert.assertEquals("4", map.ifPresentApply(4, Functions.<String>getPassThru()));
}
@Override
@Test
public void notEmpty()
{
super.notEmpty();
Assert.assertTrue(this.classUnderTest().notEmpty());
}
@Override
@Test
public void forEachWith()
{
super.forEachWith();
MutableList<Integer> result = Lists.mutable.of();
ImmutableMap<Integer, Integer> map = new ImmutableQuadrupletonMap<>(1, 1, 2, 2, 3, 3, 4, 4);
map.forEachWith((argument1, argument2) -> result.add(argument1 + argument2), 10);
Assert.assertEquals(FastList.newListWith(11, 12, 13, 14), result);
}
@Override
@Test
public void forEachWithIndex()
{
super.forEachWithIndex();
MutableList<String> result = Lists.mutable.of();
ImmutableMap<Integer, String> map = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
map.forEachWithIndex((value, index) -> {
result.add(value);
result.add(String.valueOf(index));
});
Assert.assertEquals(FastList.newListWith("One", "0", "Two", "1", "Three", "2", "Four", "3"), result);
}
@Override
@Test
public void keyValuesView()
{
super.keyValuesView();
MutableList<String> result = Lists.mutable.of();
ImmutableMap<Integer, String> map = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
for (Pair<Integer, String> keyValue : map.keyValuesView())
{
result.add(keyValue.getTwo());
}
Assert.assertEquals(FastList.newListWith("One", "Two", "Three", "Four"), result);
}
@Override
@Test
public void valuesView()
{
super.valuesView();
MutableList<String> result = Lists.mutable.of();
ImmutableMap<Integer, String> map = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
for (String value : map.valuesView())
{
result.add(value);
}
Assert.assertEquals(FastList.newListWith("One", "Two", "Three", "Four"), result);
}
@Override
@Test
public void keysView()
{
super.keysView();
MutableList<Integer> result = Lists.mutable.of();
ImmutableMap<Integer, String> map = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
for (Integer key : map.keysView())
{
result.add(key);
}
Assert.assertEquals(FastList.newListWith(1, 2, 3, 4), result);
}
@Override
@Test
public void testToString()
{
ImmutableMap<Integer, String> map = new ImmutableQuadrupletonMap<>(1, "One", 2, "Two", 3, "Three", 4, "Four");
Assert.assertEquals("{1=One, 2=Two, 3=Three, 4=Four}", map.toString());
}
@Override
public void select()
{
ImmutableMap<Integer, String> map = this.classUnderTest();
ImmutableMap<Integer, String> empty = map.select((ignored1, ignored2) -> false);
Verify.assertInstanceOf(ImmutableEmptyMap.class, empty);
ImmutableMap<Integer, String> full = map.select((ignored1, ignored2) -> true);
Verify.assertInstanceOf(ImmutableQuadrupletonMap.class, full);
Assert.assertEquals(map, full);
ImmutableMap<Integer, String> one = map.select((argument1, argument2) -> "1".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, one);
Assert.assertEquals(new ImmutableSingletonMap<>(1, "1"), one);
ImmutableMap<Integer, String> two = map.select((argument1, argument2) -> "2".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, two);
Assert.assertEquals(new ImmutableSingletonMap<>(2, "2"), two);
ImmutableMap<Integer, String> three = map.select((argument1, argument2) -> "3".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, three);
Assert.assertEquals(new ImmutableSingletonMap<>(3, "3"), three);
ImmutableMap<Integer, String> four = map.select((argument1, argument2) -> "4".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, four);
Assert.assertEquals(new ImmutableSingletonMap<>(4, "4"), four);
ImmutableMap<Integer, String> oneAndFour = map.select((argument1, argument2) -> "1".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, oneAndFour);
Assert.assertEquals(new ImmutableDoubletonMap<>(1, "1", 4, "4"), oneAndFour);
ImmutableMap<Integer, String> oneAndThree = map.select((argument1, argument2) -> "1".equals(argument2) || "3".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, oneAndThree);
Assert.assertEquals(new ImmutableDoubletonMap<>(1, "1", 3, "3"), oneAndThree);
ImmutableMap<Integer, String> oneAndTwo = map.select((argument1, argument2) -> "1".equals(argument2) || "2".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, oneAndTwo);
Assert.assertEquals(new ImmutableDoubletonMap<>(1, "1", 2, "2"), oneAndTwo);
ImmutableMap<Integer, String> twoAndFour = map.select((argument1, argument2) -> "2".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, twoAndFour);
Assert.assertEquals(new ImmutableDoubletonMap<>(2, "2", 4, "4"), twoAndFour);
ImmutableMap<Integer, String> twoAndThree = map.select((argument1, argument2) -> "2".equals(argument2) || "3".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, twoAndThree);
Assert.assertEquals(new ImmutableDoubletonMap<>(2, "2", 3, "3"), twoAndThree);
ImmutableMap<Integer, String> threeAndFour = map.select((argument1, argument2) -> "3".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, threeAndFour);
Assert.assertEquals(new ImmutableDoubletonMap<>(3, "3", 4, "4"), threeAndFour);
ImmutableMap<Integer, String> twoThreeFour = map.select((argument1, argument2) -> "2".equals(argument2) || "3".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, twoThreeFour);
Assert.assertEquals(new ImmutableTripletonMap<>(2, "2", 3, "3", 4, "4"), twoThreeFour);
ImmutableMap<Integer, String> oneThreeFour = map.select((argument1, argument2) -> "1".equals(argument2) || "3".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, oneThreeFour);
Assert.assertEquals(new ImmutableTripletonMap<>(1, "1", 3, "3", 4, "4"), oneThreeFour);
ImmutableMap<Integer, String> oneTwoFour = map.select((argument1, argument2) -> "1".equals(argument2) || "2".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, oneTwoFour);
Assert.assertEquals(new ImmutableTripletonMap<>(1, "1", 2, "2", 4, "4"), oneTwoFour);
ImmutableMap<Integer, String> oneTwoThree = map.select((argument1, argument2) -> "1".equals(argument2) || "2".equals(argument2) || "3".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, oneTwoThree);
Assert.assertEquals(new ImmutableTripletonMap<>(1, "1", 2, "2", 3, "3"), oneTwoThree);
}
@Override
public void reject()
{
ImmutableMap<Integer, String> map = this.classUnderTest();
ImmutableMap<Integer, String> empty = map.reject((ignored1, ignored2) -> true);
Verify.assertInstanceOf(ImmutableEmptyMap.class, empty);
ImmutableMap<Integer, String> full = map.reject((ignored1, ignored2) -> false);
Verify.assertInstanceOf(ImmutableQuadrupletonMap.class, full);
Assert.assertEquals(map, full);
ImmutableMap<Integer, String> one = map.reject((argument1, argument2) -> "2".equals(argument2) || "3".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, one);
Assert.assertEquals(new ImmutableSingletonMap<>(1, "1"), one);
ImmutableMap<Integer, String> two = map.reject((argument1, argument2) -> "1".equals(argument2) || "3".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, two);
Assert.assertEquals(new ImmutableSingletonMap<>(2, "2"), two);
ImmutableMap<Integer, String> three = map.reject((argument1, argument2) -> "1".equals(argument2) || "2".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, three);
Assert.assertEquals(new ImmutableSingletonMap<>(3, "3"), three);
ImmutableMap<Integer, String> four = map.reject((argument1, argument2) -> "1".equals(argument2) || "2".equals(argument2) || "3".equals(argument2));
Verify.assertInstanceOf(ImmutableSingletonMap.class, four);
Assert.assertEquals(new ImmutableSingletonMap<>(4, "4"), four);
ImmutableMap<Integer, String> oneAndFour = map.reject((argument1, argument2) -> "2".equals(argument2) || "3".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, oneAndFour);
Assert.assertEquals(new ImmutableDoubletonMap<>(1, "1", 4, "4"), oneAndFour);
ImmutableMap<Integer, String> oneAndThree = map.reject((argument1, argument2) -> "2".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, oneAndThree);
Assert.assertEquals(new ImmutableDoubletonMap<>(1, "1", 3, "3"), oneAndThree);
ImmutableMap<Integer, String> oneAndTwo = map.reject((argument1, argument2) -> "3".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, oneAndTwo);
Assert.assertEquals(new ImmutableDoubletonMap<>(1, "1", 2, "2"), oneAndTwo);
ImmutableMap<Integer, String> twoAndFour = map.reject((argument1, argument2) -> "1".equals(argument2) || "3".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, twoAndFour);
Assert.assertEquals(new ImmutableDoubletonMap<>(2, "2", 4, "4"), twoAndFour);
ImmutableMap<Integer, String> twoAndThree = map.reject((argument1, argument2) -> "1".equals(argument2) || "4".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, twoAndThree);
Assert.assertEquals(new ImmutableDoubletonMap<>(2, "2", 3, "3"), twoAndThree);
ImmutableMap<Integer, String> threeAndFour = map.reject((argument1, argument2) -> "1".equals(argument2) || "2".equals(argument2));
Verify.assertInstanceOf(ImmutableDoubletonMap.class, threeAndFour);
Assert.assertEquals(new ImmutableDoubletonMap<>(3, "3", 4, "4"), threeAndFour);
ImmutableMap<Integer, String> twoThreeFour = map.reject((argument1, argument2) -> "1".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, twoThreeFour);
Assert.assertEquals(new ImmutableTripletonMap<>(2, "2", 3, "3", 4, "4"), twoThreeFour);
ImmutableMap<Integer, String> oneThreeFour = map.reject((argument1, argument2) -> "2".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, oneThreeFour);
Assert.assertEquals(new ImmutableTripletonMap<>(1, "1", 3, "3", 4, "4"), oneThreeFour);
ImmutableMap<Integer, String> oneTwoFour = map.reject((argument1, argument2) -> "3".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, oneTwoFour);
Assert.assertEquals(new ImmutableTripletonMap<>(1, "1", 2, "2", 4, "4"), oneTwoFour);
ImmutableMap<Integer, String> oneTwoThree = map.reject((argument1, argument2) -> "4".equals(argument2));
Verify.assertInstanceOf(ImmutableTripletonMap.class, oneTwoThree);
Assert.assertEquals(new ImmutableTripletonMap<>(1, "1", 2, "2", 3, "3"), oneTwoThree);
}
@Override
public void detect()
{
ImmutableMap<Integer, String> map = this.classUnderTest();
Pair<Integer, String> one = map.detect((ignored1, ignored2) -> true);
Assert.assertEquals(Tuples.pair(1, "1"), one);
Pair<Integer, String> two = map.detect((argument1, argument2) -> "2".equals(argument2));
Assert.assertEquals(Tuples.pair(2, "2"), two);
Pair<Integer, String> three = map.detect((argument1, argument2) -> "3".equals(argument2));
Assert.assertEquals(Tuples.pair(3, "3"), three);
Pair<Integer, String> four = map.detect((argument1, argument2) -> "4".equals(argument2));
Assert.assertEquals(Tuples.pair(4, "4"), four);
Assert.assertNull(map.detect((ignored1, ignored2) -> false));
}
@Override
protected <K, V> ImmutableMap<K, V> newMapWithKeysValues(K key1, V value1, K key2, V value2, K key3, V value3, K key4, V value4)
{
return new ImmutableQuadrupletonMap<>(key1, value1, key2, value2, key3, value3, key4, value4);
}
}
|
|
package es.devera.maven.plugins.docker;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import com.kpelykh.docker.client.DockerClient;
import com.kpelykh.docker.client.DockerException;
import com.kpelykh.docker.client.model.ContainerConfig;
import com.kpelykh.docker.client.model.ContainerCreateResponse;
import com.sun.jersey.api.client.ClientResponse;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugins.annotations.Parameter;
/**
* Only works when docker daemon is running on a tcp port (no unix socket support at the moment).
* @author <a href="mailto:[email protected]">Eduardo de Vera</a>
*/
public abstract class DockerMojo extends AbstractMojo {
//TODO find out the default port for docker when listening to network ports.
private static final String DEFAULT_URL = "http://localhost:4243";
private static final ThreadLocal<String> tlContainerId = new ThreadLocal<String>();
private ContainerConfig containerConfig;
private DockerClient dockerClient;
@Parameter
private boolean attachedMode;
@Parameter(defaultValue = DEFAULT_URL)
private String url;
@Parameter(required = true)
private String containerImage;
@Parameter
private boolean stderr;
@Parameter
private boolean stdin;
@Parameter
private boolean stdout;
@Parameter
private int cpuShares;
@Parameter
private String[] dnss;
@Parameter
private String[] entryPoints;
@Parameter
private String[] envs;
@Parameter
private String hostname;
@Parameter
private long memoryLimit;
@Parameter
private long memorySwap;
@Parameter
private boolean networkDisabled;
@Parameter
private String[] onBuild;
@Parameter
private String[] portSpecs;
@Parameter
private boolean privileged;
@Parameter
private boolean stdInOnce;
@Parameter
private boolean stdInOpen;
@Parameter
private boolean tty;
@Parameter
private String user;
@Parameter
private Object volumes;
@Parameter
private String volumesFrom;
@Parameter
private String workingDir;
@Parameter(required=true)
private String[] cmds;
@Parameter
private int timeout;
@Parameter
private String containerId;
/**
* Creates a container in Docker and stores the container id in a ThreadLocal variable
* so that it can be accessed by other goals of the plugin.
*
* @throws DockerException
*/
void createContainer() throws DockerException {
getLog().debug(String.format("Creating new container"));
final ContainerCreateResponse response = getDockerClient().createContainer(getContainerConfig());
final String containerId = response.getId();
getLog().info(String.format("Created container with id %s", containerId));
DockerMojo.tlContainerId.set(containerId);
}
void startContainer() throws DockerException {
getLog().debug(String.format("Trying to start container %s", getContainerId()));
validateContainerId();
getDockerClient().startContainer(getContainerId());
if (attachedMode) {
attachContainer();
}
getDockerClient().waitContainer(getContainerId());
}
private void attachContainer() throws DockerException {
getLog().debug(String.format("Trying to attach container %s", getContainerId()));
validateContainerId();
final ClientResponse clientResponse = getDockerClient().logContainerStream(getContainerId());
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.execute(new Runnable() {
public void run() {
byte[] bytes = new byte[128];
InputStream is = clientResponse.getEntityInputStream();
InputStreamReader reader = null;
BufferedReader bufferedReader = null;
try {
reader = new InputStreamReader(is, "UTF-8");
bufferedReader = new BufferedReader(new InputStreamReader(is));
String line = null;
do {
line = bufferedReader.readLine();
if (line != null) getLog().info(String.format("docker[%s]: %s", getContainerId(), line));
} while (line != null);
} catch (IOException e) {
getLog().warn("Impossible to log the container");
} finally {
try {
try { if (bufferedReader != null) bufferedReader.close(); } catch (IOException ioe) {}
try { if (reader != null) reader.close(); } catch (IOException ioe) {}
try { if (is != null) is.close(); } catch (IOException ioe) {}
} catch (Exception e) {}
}
}
});
try {
executor.awaitTermination(1000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
}
}
void stopContainer() throws DockerException {
getLog().debug(String.format("Trying to stop container %s", getContainerId()));
validateContainerId();
getDockerClient().stopContainer(getContainerId());
}
void removeContainer() throws DockerException {
getLog().debug(String.format("Trying to remove container %s", getContainerId()));
validateContainerId();
final String containerId = this.getContainerId();
getDockerClient().removeContainer(containerId);
DockerMojo.tlContainerId.set(null);
getLog().info(String.format("Container %s has been removed", containerId));
}
void killContainer() throws DockerException {
getLog().debug(String.format("Trying to kill container %s", getContainerId()));
validateContainerId();
final String containerId = this.getContainerId();
getDockerClient().kill(containerId);
getLog().info(String.format("Container %s has been killed", getContainerId()));
}
void restartContainer() throws DockerException {
getLog().debug(String.format("Trying to restart container %s", getContainerId()));
validateContainerId();
final String containerId = this.getContainerId();
getDockerClient().restart(containerId, timeout);
getLog().info(String.format("Container %s has been restarted", getContainerId()));
}
void pullImage() throws DockerException {
getLog().debug(String.format("Trying to pull %s image", getContainerImage()));
getDockerClient().pull(getContainerImage());
getLog().info(String.format("Image %s successfully pulled", getContainerImage()));
}
private void validateContainerId() {
if (getContainerId() == null) throw new IllegalStateException("There isn't any container id set.");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Getter and Setters //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public ContainerConfig getContainerConfig() {
if (containerConfig == null) {
containerConfig = new ContainerConfig();
if (getCmds() != null) containerConfig.setCmd(getCmds());
containerConfig.setAttachStdin(isStdin());
containerConfig.setAttachStderr(isStderr());
containerConfig.setCpuShares(getCpuShares());
containerConfig.setAttachStdout(isStdout());
if (getDnss() != null) containerConfig.setDns(getDnss());
if (getEntryPoints() != null) containerConfig.setEntrypoint(getEntryPoints());
if (getEnvs() != null) containerConfig.setEnv(getEnvs());
if (getHostname() != null) containerConfig.setHostName(getHostname());
if (getContainerImage() != null) containerConfig.setImage(getContainerImage());
containerConfig.setMemoryLimit(getMemoryLimit());
containerConfig.setMemorySwap(getMemorySwap());
containerConfig.setNetworkDisabled(isNetworkDisabled());
if (getOnBuild() != null) containerConfig.setOnBuild(getOnBuild());
if (getPortSpecs() != null) containerConfig.setPortSpecs(getPortSpecs());
containerConfig.setPrivileged(isPrivileged());
containerConfig.setStdInOnce(isStdInOnce());
containerConfig.setStdinOpen(isStdInOpen());
containerConfig.setTty(isTty());
if (getUser() != null) containerConfig.setUser(getUser());
if (getVolumes() != null) containerConfig.setVolumes(getVolumes());
if (getVolumesFrom() != null) containerConfig.setVolumesFrom(getVolumesFrom());
if (getWorkingDir() != null) containerConfig.setWorkingDir(getWorkingDir());
getLog().debug(
String.format("Container configuration: \n%s", containerConfig.toString()));
}
return containerConfig;
}
static String getThreadLocalContainerId() {
return DockerMojo.tlContainerId.get();
}
String getContainerId() {
return DockerMojo.tlContainerId.get() != null ? DockerMojo.tlContainerId.get() : containerId;
}
public String getContainerImage() {
return containerImage;
}
private String getUrl() {
if (url == null) url = DEFAULT_URL;
return url;
}
DockerClient getDockerClient() {
if (dockerClient == null) dockerClient = new DockerClient(url);
return dockerClient;
}
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public void setUrl(String url) {
this.url = url;
}
public void setContainerImage(String containerImage) {
this.containerImage = containerImage;
}
public boolean isStderr() {
return stderr;
}
public void setStderr(boolean stderr) {
this.stderr = stderr;
}
public boolean isStdin() {
return stdin;
}
public void setStdin(boolean stdin) {
this.stdin = stdin;
}
public boolean isStdout() {
return stdout;
}
public void setStdout(boolean stdout) {
this.stdout = stdout;
}
public int getCpuShares() {
return cpuShares;
}
public void setCpuShares(int cpuShares) {
this.cpuShares = cpuShares;
}
public String[] getDnss() {
return dnss;
}
public void setDnss(String[] dnss) {
this.dnss = dnss;
}
public String[] getEntryPoints() {
return entryPoints;
}
public void setEntryPoints(String[] entryPoints) {
this.entryPoints = entryPoints;
}
public String[] getEnvs() {
return envs;
}
public void setEnvs(String[] envs) {
this.envs = envs;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
public long getMemoryLimit() {
return memoryLimit;
}
public void setMemoryLimit(long memoryLimit) {
this.memoryLimit = memoryLimit;
}
public long getMemorySwap() {
return memorySwap;
}
public void setMemorySwap(long memorySwap) {
this.memorySwap = memorySwap;
}
public boolean isNetworkDisabled() {
return networkDisabled;
}
public void setNetworkDisabled(boolean networkDisabled) {
this.networkDisabled = networkDisabled;
}
public String[] getOnBuild() {
return onBuild;
}
public void setOnBuild(String[] onBuild) {
this.onBuild = onBuild;
}
public String[] getPortSpecs() {
return portSpecs;
}
public void setPortSpecs(String[] portSpecs) {
this.portSpecs = portSpecs;
}
public boolean isPrivileged() {
return privileged;
}
public void setPrivileged(boolean privileged) {
this.privileged = privileged;
}
public boolean isStdInOnce() {
return stdInOnce;
}
public void setStdInOnce(boolean stdInOnce) {
this.stdInOnce = stdInOnce;
}
public boolean isStdInOpen() {
return stdInOpen;
}
public void setStdInOpen(boolean stdInOpen) {
this.stdInOpen = stdInOpen;
}
public boolean isTty() {
return tty;
}
public void setTty(boolean tty) {
this.tty = tty;
}
public String getUser() {
return user;
}
public void setUser(String user) {
this.user = user;
}
public Object getVolumes() {
return volumes;
}
public void setVolumes(Object volumes) {
this.volumes = volumes;
}
public String getVolumesFrom() {
return volumesFrom;
}
public void setVolumesFrom(String volumesFrom) {
this.volumesFrom = volumesFrom;
}
public String getWorkingDir() {
return workingDir;
}
public void setWorkingDir(String workingDir) {
this.workingDir = workingDir;
}
public String[] getCmds() {
return cmds;
}
public void setCmds(String[] cmds) {
this.cmds = cmds;
}
public boolean isAttachedMode() {
return attachedMode;
}
public void setAttachedMode(boolean attachedMode) {
this.attachedMode = attachedMode;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// For testing purposes //
////////////////////////////////////////////////////////////////////////////////////////////////////
void setDockerClient(DockerClient dockerClient) {
this.dockerClient = dockerClient;
}
void setContainerConfig(ContainerConfig containerConfig) {
this.containerConfig = containerConfig;
}
}
|
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.rollover;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.rest.action.cat.RestIndicesActionTests;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.mockito.ArgumentCaptor;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TransportRolloverActionTests extends ESTestCase {
public void testDocStatsSelectionFromPrimariesOnly() {
long docsInPrimaryShards = 100;
long docsInShards = 200;
final Condition condition = createTestCondition();
String indexName = randomAlphaOfLengthBetween(5, 7);
evaluateConditions(Sets.newHashSet(condition), createMetaData(indexName),
createIndicesStatResponse(indexName, docsInShards, docsInPrimaryShards));
final ArgumentCaptor<Condition.Stats> argument = ArgumentCaptor.forClass(Condition.Stats.class);
verify(condition).evaluate(argument.capture());
assertEquals(docsInPrimaryShards, argument.getValue().numDocs);
}
public void testEvaluateConditions() {
MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L);
MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(2));
MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 100), ByteSizeUnit.MB));
long matchMaxDocs = randomIntBetween(100, 1000);
long notMatchMaxDocs = randomIntBetween(0, 99);
ByteSizeValue notMatchMaxSize = new ByteSizeValue(randomIntBetween(0, 9), ByteSizeUnit.MB);
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
final IndexMetaData metaData = IndexMetaData.builder(randomAlphaOfLength(10))
.creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(3).getMillis())
.settings(settings)
.build();
final Set<Condition> conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition);
Map<String, Boolean> results = evaluateConditions(conditions,
new DocsStats(matchMaxDocs, 0L, ByteSizeUnit.MB.toBytes(120)), metaData);
assertThat(results.size(), equalTo(3));
for (Boolean matched : results.values()) {
assertThat(matched, equalTo(true));
}
results = evaluateConditions(conditions, new DocsStats(notMatchMaxDocs, 0, notMatchMaxSize.getBytes()), metaData);
assertThat(results.size(), equalTo(3));
for (Map.Entry<String, Boolean> entry : results.entrySet()) {
if (entry.getKey().equals(maxAgeCondition.toString())) {
assertThat(entry.getValue(), equalTo(true));
} else if (entry.getKey().equals(maxDocsCondition.toString())) {
assertThat(entry.getValue(), equalTo(false));
} else if (entry.getKey().equals(maxSizeCondition.toString())) {
assertThat(entry.getValue(), equalTo(false));
} else {
fail("unknown condition result found " + entry.getKey());
}
}
}
public void testEvaluateWithoutDocStats() {
MaxDocsCondition maxDocsCondition = new MaxDocsCondition(randomNonNegativeLong());
MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(randomIntBetween(1, 3)));
MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomNonNegativeLong()));
Set<Condition> conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition);
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 1000))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(10))
.build();
final IndexMetaData metaData = IndexMetaData.builder(randomAlphaOfLength(10))
.creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(randomIntBetween(5, 10)).getMillis())
.settings(settings)
.build();
Map<String, Boolean> results = evaluateConditions(conditions, null, metaData);
assertThat(results.size(), equalTo(3));
for (Map.Entry<String, Boolean> entry : results.entrySet()) {
if (entry.getKey().equals(maxAgeCondition.toString())) {
assertThat(entry.getValue(), equalTo(true));
} else if (entry.getKey().equals(maxDocsCondition.toString())) {
assertThat(entry.getValue(), equalTo(false));
} else if (entry.getKey().equals(maxSizeCondition.toString())) {
assertThat(entry.getValue(), equalTo(false));
} else {
fail("unknown condition result found " + entry.getKey());
}
}
}
public void testEvaluateWithoutMetaData() {
MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L);
MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(2));
MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 100), ByteSizeUnit.MB));
long matchMaxDocs = randomIntBetween(100, 1000);
final Set<Condition> conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition);
Map<String, Boolean> results = evaluateConditions(conditions,
new DocsStats(matchMaxDocs, 0L, ByteSizeUnit.MB.toBytes(120)), null);
assertThat(results.size(), equalTo(3));
results.forEach((k, v) -> assertFalse(v));
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 1000))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(10))
.build();
final IndexMetaData metaData = IndexMetaData.builder(randomAlphaOfLength(10))
.creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(randomIntBetween(5, 10)).getMillis())
.settings(settings)
.build();
IndicesStatsResponse indicesStats = RestIndicesActionTests.randomIndicesStatsResponse(new Index[]{metaData.getIndex()});
Map<String, Boolean> results2 = evaluateConditions(conditions, null, indicesStats);
assertThat(results2.size(), equalTo(3));
results2.forEach((k, v) -> assertFalse(v));
}
public void testCreateUpdateAliasRequest() {
String sourceAlias = randomAlphaOfLength(10);
String sourceIndex = randomAlphaOfLength(10);
String targetIndex = randomAlphaOfLength(10);
final RolloverRequest rolloverRequest = new RolloverRequest(sourceAlias, targetIndex);
final IndicesAliasesClusterStateUpdateRequest updateRequest =
TransportRolloverAction.prepareRolloverAliasesUpdateRequest(sourceIndex, targetIndex, rolloverRequest);
List<AliasAction> actions = updateRequest.actions();
assertThat(actions, hasSize(2));
boolean foundAdd = false;
boolean foundRemove = false;
for (AliasAction action : actions) {
if (action.getIndex().equals(targetIndex)) {
assertEquals(sourceAlias, ((AliasAction.Add) action).getAlias());
foundAdd = true;
} else if (action.getIndex().equals(sourceIndex)) {
assertEquals(sourceAlias, ((AliasAction.Remove) action).getAlias());
foundRemove = true;
} else {
throw new AssertionError("Unknown index [" + action.getIndex() + "]");
}
}
assertTrue(foundAdd);
assertTrue(foundRemove);
}
public void testCreateUpdateAliasRequestWithExplicitWriteIndex() {
String sourceAlias = randomAlphaOfLength(10);
String sourceIndex = randomAlphaOfLength(10);
String targetIndex = randomAlphaOfLength(10);
final RolloverRequest rolloverRequest = new RolloverRequest(sourceAlias, targetIndex);
final IndicesAliasesClusterStateUpdateRequest updateRequest =
TransportRolloverAction.prepareRolloverAliasesWriteIndexUpdateRequest(sourceIndex, targetIndex, rolloverRequest);
List<AliasAction> actions = updateRequest.actions();
assertThat(actions, hasSize(2));
boolean foundAddWrite = false;
boolean foundRemoveWrite = false;
for (AliasAction action : actions) {
AliasAction.Add addAction = (AliasAction.Add) action;
if (action.getIndex().equals(targetIndex)) {
assertEquals(sourceAlias, addAction.getAlias());
assertTrue(addAction.writeIndex());
foundAddWrite = true;
} else if (action.getIndex().equals(sourceIndex)) {
assertEquals(sourceAlias, addAction.getAlias());
assertFalse(addAction.writeIndex());
foundRemoveWrite = true;
} else {
throw new AssertionError("Unknown index [" + action.getIndex() + "]");
}
}
assertTrue(foundAddWrite);
assertTrue(foundRemoveWrite);
}
public void testValidation() {
String index1 = randomAlphaOfLength(10);
String aliasWithWriteIndex = randomAlphaOfLength(10);
String index2 = randomAlphaOfLength(10);
String aliasWithNoWriteIndex = randomAlphaOfLength(10);
Boolean firstIsWriteIndex = randomFrom(false, null);
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
MetaData.Builder metaDataBuilder = MetaData.builder()
.put(IndexMetaData.builder(index1)
.settings(settings)
.putAlias(AliasMetaData.builder(aliasWithWriteIndex))
.putAlias(AliasMetaData.builder(aliasWithNoWriteIndex).writeIndex(firstIsWriteIndex))
);
IndexMetaData.Builder indexTwoBuilder = IndexMetaData.builder(index2).settings(settings);
if (firstIsWriteIndex == null) {
indexTwoBuilder.putAlias(AliasMetaData.builder(aliasWithNoWriteIndex).writeIndex(randomFrom(false, null)));
}
metaDataBuilder.put(indexTwoBuilder);
MetaData metaData = metaDataBuilder.build();
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
TransportRolloverAction.validate(metaData, new RolloverRequest(aliasWithNoWriteIndex,
randomAlphaOfLength(10))));
assertThat(exception.getMessage(), equalTo("source alias [" + aliasWithNoWriteIndex + "] does not point to a write index"));
exception = expectThrows(IllegalArgumentException.class, () ->
TransportRolloverAction.validate(metaData, new RolloverRequest(randomFrom(index1, index2),
randomAlphaOfLength(10))));
assertThat(exception.getMessage(), equalTo("source alias is a concrete index"));
exception = expectThrows(IllegalArgumentException.class, () ->
TransportRolloverAction.validate(metaData, new RolloverRequest(randomAlphaOfLength(5),
randomAlphaOfLength(10)))
);
assertThat(exception.getMessage(), equalTo("source alias does not exist"));
TransportRolloverAction.validate(metaData, new RolloverRequest(aliasWithWriteIndex, randomAlphaOfLength(10)));
}
public void testGenerateRolloverIndexName() {
String invalidIndexName = randomAlphaOfLength(10) + "A";
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY);
expectThrows(IllegalArgumentException.class, () ->
TransportRolloverAction.generateRolloverIndexName(invalidIndexName, indexNameExpressionResolver));
int num = randomIntBetween(0, 100);
final String indexPrefix = randomAlphaOfLength(10);
String indexEndingInNumbers = indexPrefix + "-" + num;
assertThat(TransportRolloverAction.generateRolloverIndexName(indexEndingInNumbers, indexNameExpressionResolver),
equalTo(indexPrefix + "-" + String.format(Locale.ROOT, "%06d", num + 1)));
assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-1", indexNameExpressionResolver),
equalTo("index-name-000002"));
assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-2", indexNameExpressionResolver),
equalTo("index-name-000003"));
assertEquals( "<index-name-{now/d}-000002>", TransportRolloverAction.generateRolloverIndexName("<index-name-{now/d}-1>",
indexNameExpressionResolver));
}
public void testCreateIndexRequest() {
String alias = randomAlphaOfLength(10);
String rolloverIndex = randomAlphaOfLength(10);
final RolloverRequest rolloverRequest = new RolloverRequest(alias, randomAlphaOfLength(10));
final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE;
rolloverRequest.getCreateIndexRequest().waitForActiveShards(activeShardCount);
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
rolloverRequest.getCreateIndexRequest().settings(settings);
final CreateIndexClusterStateUpdateRequest createIndexRequest =
TransportRolloverAction.prepareCreateIndexRequest(rolloverIndex, rolloverIndex, rolloverRequest);
assertThat(createIndexRequest.settings(), equalTo(settings));
assertThat(createIndexRequest.index(), equalTo(rolloverIndex));
assertThat(createIndexRequest.cause(), equalTo("rollover_index"));
}
public void testRejectDuplicateAlias() {
final IndexTemplateMetaData template = IndexTemplateMetaData.builder("test-template")
.patterns(Arrays.asList("foo-*", "bar-*"))
.putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write").writeIndex(randomBoolean()))
.build();
final MetaData metaData = MetaData.builder().put(createMetaData(randomAlphaOfLengthBetween(5, 7)), false).put(template).build();
String indexName = randomFrom("foo-123", "bar-xyz");
String aliasName = randomFrom("foo-write", "bar-write");
final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> TransportRolloverAction.checkNoDuplicatedAliasInIndexTemplate(metaData, indexName, aliasName));
assertThat(ex.getMessage(), containsString("index template [test-template]"));
}
public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPrimariesFromWriteIndex() throws Exception {
final TransportService mockTransportService = mock(TransportService.class);
final ClusterService mockClusterService = mock(ClusterService.class);
final DiscoveryNode mockNode = mock(DiscoveryNode.class);
when(mockNode.getId()).thenReturn("mocknode");
when(mockClusterService.localNode()).thenReturn(mockNode);
final ThreadPool mockThreadPool = mock(ThreadPool.class);
final MetaDataCreateIndexService mockCreateIndexService = mock(MetaDataCreateIndexService.class);
final IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class);
when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).thenReturn("logs-index-000003");
final ActionFilters mockActionFilters = mock(ActionFilters.class);
final MetaDataIndexAliasesService mdIndexAliasesService = mock(MetaDataIndexAliasesService.class);
final Client mockClient = mock(Client.class);
final Map<String, IndexStats> indexStats = new HashMap<>();
int total = randomIntBetween(500, 1000);
indexStats.put("logs-index-000001", createIndexStats(200L, total));
indexStats.put("logs-index-000002", createIndexStats(300L, total));
final IndicesStatsResponse statsResponse = createAliasToMultipleIndicesStatsResponse(indexStats);
doAnswer(invocation -> {
Object[] args = invocation.getArguments();
assert args.length == 3;
@SuppressWarnings("unchecked")
ActionListener<IndicesStatsResponse> listener = (ActionListener<IndicesStatsResponse>) args[2];
listener.onResponse(statsResponse);
return null;
}).when(mockClient).execute(any(IndicesStatsAction.class), any(IndicesStatsRequest.class), any(ActionListener.class));
assert statsResponse.getPrimaries().getDocs().getCount() == 500L;
assert statsResponse.getTotal().getDocs().getCount() == (total + total);
final IndexMetaData.Builder indexMetaData = IndexMetaData.builder("logs-index-000001")
.putAlias(AliasMetaData.builder("logs-alias").writeIndex(false).build()).settings(settings(Version.CURRENT))
.numberOfShards(1).numberOfReplicas(1);
final IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("logs-index-000002")
.putAlias(AliasMetaData.builder("logs-alias").writeIndex(true).build()).settings(settings(Version.CURRENT))
.numberOfShards(1).numberOfReplicas(1);
final ClusterState stateBefore = ClusterState.builder(ClusterName.DEFAULT)
.metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build();
final TransportRolloverAction transportRolloverAction = new TransportRolloverAction(Settings.EMPTY, mockTransportService,
mockClusterService, mockThreadPool, mockCreateIndexService, mockActionFilters, mockIndexNameExpressionResolver,
mdIndexAliasesService, mockClient);
// For given alias, verify that condition evaluation fails when the condition doc count is greater than the primaries doc count
// (primaries from only write index is considered)
PlainActionFuture<RolloverResponse> future = new PlainActionFuture<>();
RolloverRequest rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003");
rolloverRequest.addMaxIndexDocsCondition(500L);
rolloverRequest.dryRun(true);
transportRolloverAction.masterOperation(mock(Task.class), rolloverRequest, stateBefore, future);
RolloverResponse response = future.actionGet();
assertThat(response.getOldIndex(), equalTo("logs-index-000002"));
assertThat(response.getNewIndex(), equalTo("logs-index-000003"));
assertThat(response.isDryRun(), equalTo(true));
assertThat(response.isRolledOver(), equalTo(false));
assertThat(response.getConditionStatus().size(), equalTo(1));
assertThat(response.getConditionStatus().get("[max_docs: 500]"), is(false));
// For given alias, verify that the condition evaluation is successful when condition doc count is less than the primaries doc count
// (primaries from only write index is considered)
future = new PlainActionFuture<>();
rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003");
rolloverRequest.addMaxIndexDocsCondition(300L);
rolloverRequest.dryRun(true);
transportRolloverAction.masterOperation(mock(Task.class), rolloverRequest, stateBefore, future);
response = future.actionGet();
assertThat(response.getOldIndex(), equalTo("logs-index-000002"));
assertThat(response.getNewIndex(), equalTo("logs-index-000003"));
assertThat(response.isDryRun(), equalTo(true));
assertThat(response.isRolledOver(), equalTo(false));
assertThat(response.getConditionStatus().size(), equalTo(1));
assertThat(response.getConditionStatus().get("[max_docs: 300]"), is(true));
}
private IndicesStatsResponse createIndicesStatResponse(String indexName, long totalDocs, long primariesDocs) {
final CommonStats primaryStats = mock(CommonStats.class);
when(primaryStats.getDocs()).thenReturn(new DocsStats(primariesDocs, 0, between(1, 10000)));
final CommonStats totalStats = mock(CommonStats.class);
when(totalStats.getDocs()).thenReturn(new DocsStats(totalDocs, 0, between(1, 10000)));
final IndicesStatsResponse response = mock(IndicesStatsResponse.class);
when(response.getPrimaries()).thenReturn(primaryStats);
when(response.getTotal()).thenReturn(totalStats);
final IndexStats indexStats = mock(IndexStats.class);
when(response.getIndex(indexName)).thenReturn(indexStats);
when(indexStats.getPrimaries()).thenReturn(primaryStats);
when(indexStats.getTotal()).thenReturn(totalStats);
return response;
}
private IndicesStatsResponse createAliasToMultipleIndicesStatsResponse(Map<String, IndexStats> indexStats) {
final IndicesStatsResponse response = mock(IndicesStatsResponse.class);
final CommonStats primariesStats = new CommonStats();
final CommonStats totalStats = new CommonStats();
for (String indexName : indexStats.keySet()) {
when(response.getIndex(indexName)).thenReturn(indexStats.get(indexName));
primariesStats.add(indexStats.get(indexName).getPrimaries());
totalStats.add(indexStats.get(indexName).getTotal());
}
when(response.getPrimaries()).thenReturn(primariesStats);
when(response.getTotal()).thenReturn(totalStats);
return response;
}
private IndexStats createIndexStats(long primaries, long total) {
final CommonStats primariesCommonStats = mock(CommonStats.class);
when(primariesCommonStats.getDocs()).thenReturn(new DocsStats(primaries, 0, between(1, 10000)));
final CommonStats totalCommonStats = mock(CommonStats.class);
when(totalCommonStats.getDocs()).thenReturn(new DocsStats(total, 0, between(1, 10000)));
IndexStats indexStats = mock(IndexStats.class);
when(indexStats.getPrimaries()).thenReturn(primariesCommonStats);
when(indexStats.getTotal()).thenReturn(totalCommonStats);
return indexStats;
}
private static IndexMetaData createMetaData(String indexName) {
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
return IndexMetaData.builder(indexName)
.creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(3).getMillis())
.settings(settings)
.build();
}
private static Condition createTestCondition() {
final Condition condition = mock(Condition.class);
when(condition.evaluate(any())).thenReturn(new Condition.Result(condition, true));
return condition;
}
}
|
|
/**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
package com.facebook.react.views.view;
import javax.annotation.Nullable;
import android.content.Context;
import android.graphics.Color;
import android.graphics.Rect;
import android.graphics.drawable.Drawable;
import android.graphics.drawable.LayerDrawable;
import android.view.animation.Animation;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import com.facebook.infer.annotation.Assertions;
import com.facebook.react.common.annotations.VisibleForTesting;
import com.facebook.react.touch.ReactInterceptingViewGroup;
import com.facebook.react.touch.OnInterceptTouchEventListener;
import com.facebook.react.uimanager.MeasureSpecAssertions;
import com.facebook.react.uimanager.PointerEvents;
import com.facebook.react.uimanager.ReactPointerEventsView;
/**
* Backing for a React View. Has support for borders, but since borders aren't common, lazy
* initializes most of the storage needed for them.
*/
public class ReactViewGroup extends ViewGroup implements
ReactInterceptingViewGroup, ReactClippingViewGroup, ReactPointerEventsView {
private static final int ARRAY_CAPACITY_INCREMENT = 12;
private static final int DEFAULT_BACKGROUND_COLOR = Color.TRANSPARENT;
private static final LayoutParams sDefaultLayoutParam = new ViewGroup.LayoutParams(0, 0);
/* should only be used in {@link #updateClippingToRect} */
private static final Rect sHelperRect = new Rect();
/**
* This listener will be set for child views when removeClippedSubview property is enabled. When
* children layout is updated, it will call {@link #updateSubviewClipStatus} to notify parent
* view about that fact so that view can be attached/detached if necessary.
*
* TODO(7728005): Attach/detach views in batch - once per frame in case when multiple children
* update their layout.
*/
private static final class ChildrenLayoutChangeListener implements OnLayoutChangeListener {
private final ReactViewGroup mParent;
private ChildrenLayoutChangeListener(ReactViewGroup parent) {
mParent = parent;
}
@Override
public void onLayoutChange(
View v,
int left,
int top,
int right,
int bottom,
int oldLeft,
int oldTop,
int oldRight,
int oldBottom) {
if (mParent.getRemoveClippedSubviews()) {
mParent.updateSubviewClipStatus(v);
}
}
}
// Following properties are here to support the option {@code removeClippedSubviews}. This is a
// temporary optimization/hack that is mainly applicable to the large list of images. The way
// it's implemented is that we store an additional array of children in view node. We selectively
// remove some of the views (detach) from it while still storing them in that additional array.
// We override all possible add methods for {@link ViewGroup} so that we can controll this process
// whenever the option is set. We also override {@link ViewGroup#getChildAt} and
// {@link ViewGroup#getChildCount} so those methods may return views that are not attached.
// This is risky but allows us to perform a correct cleanup in {@link NativeViewHierarchyManager}.
private boolean mRemoveClippedSubviews = false;
private @Nullable View[] mAllChildren = null;
private int mAllChildrenCount;
private @Nullable Rect mClippingRect;
private PointerEvents mPointerEvents = PointerEvents.AUTO;
private @Nullable ChildrenLayoutChangeListener mChildrenLayoutChangeListener;
private @Nullable ReactViewBackgroundDrawable mReactBackgroundDrawable;
private @Nullable OnInterceptTouchEventListener mOnInterceptTouchEventListener;
private boolean mNeedsOffscreenAlphaCompositing = false;
public ReactViewGroup(Context context) {
super(context);
}
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
MeasureSpecAssertions.assertExplicitMeasureSpec(widthMeasureSpec, heightMeasureSpec);
setMeasuredDimension(
MeasureSpec.getSize(widthMeasureSpec),
MeasureSpec.getSize(heightMeasureSpec));
}
@Override
protected void onLayout(boolean changed, int left, int top, int right, int bottom) {
// No-op since UIManagerModule handles actually laying out children.
}
@Override
public void requestLayout() {
// No-op, terminate `requestLayout` here, UIManagerModule handles laying out children and
// `layout` is called on all RN-managed views by `NativeViewHierarchyManager`
}
@Override
public void setBackgroundColor(int color) {
if (color == Color.TRANSPARENT && mReactBackgroundDrawable == null) {
// don't do anything, no need to allocate ReactBackgroundDrawable for transparent background
} else {
getOrCreateReactViewBackground().setColor(color);
}
}
@Override
public void setBackground(Drawable drawable) {
throw new UnsupportedOperationException(
"This method is not supported for ReactViewGroup instances");
}
public void setTranslucentBackgroundDrawable(@Nullable Drawable background) {
// it's required to call setBackground to null, as in some of the cases we may set new
// background to be a layer drawable that contains a drawable that has been previously setup
// as a background previously. This will not work correctly as the drawable callback logic is
// messed up in AOSP
super.setBackground(null);
if (mReactBackgroundDrawable != null && background != null) {
LayerDrawable layerDrawable =
new LayerDrawable(new Drawable[] {mReactBackgroundDrawable, background});
super.setBackground(layerDrawable);
} else if (background != null) {
super.setBackground(background);
}
}
@Override
public void setOnInterceptTouchEventListener(OnInterceptTouchEventListener listener) {
mOnInterceptTouchEventListener = listener;
}
@Override
public boolean onInterceptTouchEvent(MotionEvent ev) {
if (mOnInterceptTouchEventListener != null &&
mOnInterceptTouchEventListener.onInterceptTouchEvent(this, ev)) {
return true;
}
// We intercept the touch event if the children are not supposed to receive it.
if (mPointerEvents == PointerEvents.NONE || mPointerEvents == PointerEvents.BOX_ONLY) {
return true;
}
return super.onInterceptTouchEvent(ev);
}
@Override
public boolean onTouchEvent(MotionEvent ev) {
// We do not accept the touch event if this view is not supposed to receive it.
if (mPointerEvents == PointerEvents.NONE || mPointerEvents == PointerEvents.BOX_NONE) {
return false;
}
// The root view always assumes any view that was tapped wants the touch
// and sends the event to JS as such.
// We don't need to do bubbling in native (it's already happening in JS).
// For an explanation of bubbling and capturing, see
// http://javascript.info/tutorial/bubbling-and-capturing#capturing
return true;
}
/**
* We override this to allow developers to determine whether they need offscreen alpha compositing
* or not. See the documentation of needsOffscreenAlphaCompositing in View.js.
*/
@Override
public boolean hasOverlappingRendering() {
return mNeedsOffscreenAlphaCompositing;
}
/**
* See the documentation of needsOffscreenAlphaCompositing in View.js.
*/
public void setNeedsOffscreenAlphaCompositing(boolean needsOffscreenAlphaCompositing) {
mNeedsOffscreenAlphaCompositing = needsOffscreenAlphaCompositing;
}
public void setBorderWidth(int position, float width) {
getOrCreateReactViewBackground().setBorderWidth(position, width);
}
public void setBorderColor(int position, float color) {
getOrCreateReactViewBackground().setBorderColor(position, color);
}
public void setBorderRadius(float borderRadius) {
getOrCreateReactViewBackground().setRadius(borderRadius);
}
public void setBorderStyle(@Nullable String style) {
getOrCreateReactViewBackground().setBorderStyle(style);
}
@Override
public void setRemoveClippedSubviews(boolean removeClippedSubviews) {
if (removeClippedSubviews == mRemoveClippedSubviews) {
return;
}
mRemoveClippedSubviews = removeClippedSubviews;
if (removeClippedSubviews) {
mClippingRect = new Rect();
ReactClippingViewGroupHelper.calculateClippingRect(this, mClippingRect);
mAllChildrenCount = getChildCount();
int initialSize = Math.max(12, mAllChildrenCount);
mAllChildren = new View[initialSize];
mChildrenLayoutChangeListener = new ChildrenLayoutChangeListener(this);
for (int i = 0; i < mAllChildrenCount; i++) {
View child = getChildAt(i);
mAllChildren[i] = child;
child.addOnLayoutChangeListener(mChildrenLayoutChangeListener);
}
updateClippingRect();
} else {
// Add all clipped views back, deallocate additional arrays, remove layoutChangeListener
Assertions.assertNotNull(mClippingRect);
Assertions.assertNotNull(mAllChildren);
Assertions.assertNotNull(mChildrenLayoutChangeListener);
for (int i = 0; i < mAllChildrenCount; i++) {
mAllChildren[i].removeOnLayoutChangeListener(mChildrenLayoutChangeListener);
}
getDrawingRect(mClippingRect);
updateClippingToRect(mClippingRect);
mAllChildren = null;
mClippingRect = null;
mAllChildrenCount = 0;
mChildrenLayoutChangeListener = null;
}
}
@Override
public boolean getRemoveClippedSubviews() {
return mRemoveClippedSubviews;
}
@Override
public void getClippingRect(Rect outClippingRect) {
outClippingRect.set(mClippingRect);
}
@Override
public void updateClippingRect() {
if (!mRemoveClippedSubviews) {
return;
}
Assertions.assertNotNull(mClippingRect);
Assertions.assertNotNull(mAllChildren);
ReactClippingViewGroupHelper.calculateClippingRect(this, mClippingRect);
updateClippingToRect(mClippingRect);
}
private void updateClippingToRect(Rect clippingRect) {
Assertions.assertNotNull(mAllChildren);
int clippedSoFar = 0;
for (int i = 0; i < mAllChildrenCount; i++) {
updateSubviewClipStatus(clippingRect, i, clippedSoFar);
if (mAllChildren[i].getParent() == null) {
clippedSoFar++;
}
}
}
private void updateSubviewClipStatus(Rect clippingRect, int idx, int clippedSoFar) {
View child = Assertions.assertNotNull(mAllChildren)[idx];
sHelperRect.set(child.getLeft(), child.getTop(), child.getRight(), child.getBottom());
boolean intersects = clippingRect
.intersects(sHelperRect.left, sHelperRect.top, sHelperRect.right, sHelperRect.bottom);
boolean needUpdateClippingRecursive = false;
// We never want to clip children that are being animated, as this can easily break layout :
// when layout animation changes size and/or position of views contained inside a listview that
// clips offscreen children, we need to ensure that, when view exits the viewport, final size
// and position is set prior to removing the view from its listview parent.
// Otherwise, when view gets re-attached again, i.e when it re-enters the viewport after scroll,
// it won't be size and located properly.
Animation animation = child.getAnimation();
boolean isAnimating = animation != null && !animation.hasEnded();
if (!intersects && child.getParent() != null && !isAnimating) {
// We can try saving on invalidate call here as the view that we remove is out of visible area
// therefore invalidation is not necessary.
super.removeViewsInLayout(idx - clippedSoFar, 1);
needUpdateClippingRecursive = true;
} else if (intersects && child.getParent() == null) {
super.addViewInLayout(child, idx - clippedSoFar, sDefaultLayoutParam, true);
invalidate();
needUpdateClippingRecursive = true;
} else if (intersects) {
// If there is any intersection we need to inform the child to update its clipping rect
needUpdateClippingRecursive = true;
}
if (needUpdateClippingRecursive) {
if (child instanceof ReactClippingViewGroup) {
// we don't use {@link sHelperRect} until the end of this loop, therefore it's safe
// to call this method that may write to the same {@link sHelperRect} object.
ReactClippingViewGroup clippingChild = (ReactClippingViewGroup) child;
if (clippingChild.getRemoveClippedSubviews()) {
clippingChild.updateClippingRect();
}
}
}
}
private void updateSubviewClipStatus(View subview) {
if (!mRemoveClippedSubviews || getParent() == null) {
return;
}
Assertions.assertNotNull(mClippingRect);
Assertions.assertNotNull(mAllChildren);
// do fast check whether intersect state changed
sHelperRect.set(subview.getLeft(), subview.getTop(), subview.getRight(), subview.getBottom());
boolean intersects = mClippingRect
.intersects(sHelperRect.left, sHelperRect.top, sHelperRect.right, sHelperRect.bottom);
// If it was intersecting before, should be attached to the parent
boolean oldIntersects = (subview.getParent() != null);
if (intersects != oldIntersects) {
int clippedSoFar = 0;
for (int i = 0; i < mAllChildrenCount; i++) {
if (mAllChildren[i] == subview) {
updateSubviewClipStatus(mClippingRect, i, clippedSoFar);
break;
}
if (mAllChildren[i].getParent() == null) {
clippedSoFar++;
}
}
}
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
if (mRemoveClippedSubviews) {
updateClippingRect();
}
}
@Override
protected void onAttachedToWindow() {
super.onAttachedToWindow();
if (mRemoveClippedSubviews) {
updateClippingRect();
}
}
@Override
public PointerEvents getPointerEvents() {
return mPointerEvents;
}
/*package*/ void setPointerEvents(PointerEvents pointerEvents) {
mPointerEvents = pointerEvents;
}
/*package*/ int getAllChildrenCount() {
return mAllChildrenCount;
}
/*package*/ View getChildAtWithSubviewClippingEnabled(int index) {
return Assertions.assertNotNull(mAllChildren)[index];
}
/*package*/ void addViewWithSubviewClippingEnabled(View child, int index) {
addViewWithSubviewClippingEnabled(child, index, sDefaultLayoutParam);
}
/*package*/ void addViewWithSubviewClippingEnabled(View child, int index, LayoutParams params) {
Assertions.assertCondition(mRemoveClippedSubviews);
Assertions.assertNotNull(mClippingRect);
Assertions.assertNotNull(mAllChildren);
addInArray(child, index);
// we add view as "clipped" and then run {@link #updateSubviewClipStatus} to conditionally
// attach it
int clippedSoFar = 0;
for (int i = 0; i < index; i++) {
if (mAllChildren[i].getParent() == null) {
clippedSoFar++;
}
}
updateSubviewClipStatus(mClippingRect, index, clippedSoFar);
child.addOnLayoutChangeListener(mChildrenLayoutChangeListener);
}
/*package*/ void removeViewWithSubviewClippingEnabled(View view) {
Assertions.assertCondition(mRemoveClippedSubviews);
Assertions.assertNotNull(mClippingRect);
Assertions.assertNotNull(mAllChildren);
view.removeOnLayoutChangeListener(mChildrenLayoutChangeListener);
int index = indexOfChildInAllChildren(view);
if (mAllChildren[index].getParent() != null) {
int clippedSoFar = 0;
for (int i = 0; i < index; i++) {
if (mAllChildren[i].getParent() == null) {
clippedSoFar++;
}
}
super.removeViewsInLayout(index - clippedSoFar, 1);
}
removeFromArray(index);
}
/*package*/ void removeAllViewsWithSubviewClippingEnabled() {
Assertions.assertCondition(mRemoveClippedSubviews);
Assertions.assertNotNull(mAllChildren);
for (int i = 0; i < mAllChildrenCount; i++) {
mAllChildren[i].removeOnLayoutChangeListener(mChildrenLayoutChangeListener);
}
removeAllViewsInLayout();
mAllChildrenCount = 0;
}
private int indexOfChildInAllChildren(View child) {
final int count = mAllChildrenCount;
final View[] children = Assertions.assertNotNull(mAllChildren);
for (int i = 0; i < count; i++) {
if (children[i] == child) {
return i;
}
}
return -1;
}
private void addInArray(View child, int index) {
View[] children = Assertions.assertNotNull(mAllChildren);
final int count = mAllChildrenCount;
final int size = children.length;
if (index == count) {
if (size == count) {
mAllChildren = new View[size + ARRAY_CAPACITY_INCREMENT];
System.arraycopy(children, 0, mAllChildren, 0, size);
children = mAllChildren;
}
children[mAllChildrenCount++] = child;
} else if (index < count) {
if (size == count) {
mAllChildren = new View[size + ARRAY_CAPACITY_INCREMENT];
System.arraycopy(children, 0, mAllChildren, 0, index);
System.arraycopy(children, index, mAllChildren, index + 1, count - index);
children = mAllChildren;
} else {
System.arraycopy(children, index, children, index + 1, count - index);
}
children[index] = child;
mAllChildrenCount++;
} else {
throw new IndexOutOfBoundsException("index=" + index + " count=" + count);
}
}
// This method also sets the child's mParent to null
private void removeFromArray(int index) {
final View[] children = Assertions.assertNotNull(mAllChildren);
final int count = mAllChildrenCount;
if (index == count - 1) {
children[--mAllChildrenCount] = null;
} else if (index >= 0 && index < count) {
System.arraycopy(children, index + 1, children, index, count - index - 1);
children[--mAllChildrenCount] = null;
} else {
throw new IndexOutOfBoundsException();
}
}
@VisibleForTesting
public int getBackgroundColor() {
if (getBackground() != null) {
return ((ReactViewBackgroundDrawable) getBackground()).getColor();
}
return DEFAULT_BACKGROUND_COLOR;
}
private ReactViewBackgroundDrawable getOrCreateReactViewBackground() {
if (mReactBackgroundDrawable == null) {
mReactBackgroundDrawable = new ReactViewBackgroundDrawable();
Drawable backgroundDrawable = getBackground();
super.setBackground(null); // required so that drawable callback is cleared before we add the
// drawable back as a part of LayerDrawable
if (backgroundDrawable == null) {
super.setBackground(mReactBackgroundDrawable);
} else {
LayerDrawable layerDrawable =
new LayerDrawable(new Drawable[] {mReactBackgroundDrawable, backgroundDrawable});
super.setBackground(layerDrawable);
}
}
return mReactBackgroundDrawable;
}
}
|
|
package seedu.task.testutil;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.loadui.testfx.GuiTest;
import org.ocpsoft.prettytime.shade.edu.emory.mathcs.backport.java.util.Collections;
import org.testfx.api.FxToolkit;
import com.google.common.io.Files;
import guitests.guihandles.TaskCardHandle;
import javafx.geometry.Bounds;
import javafx.geometry.Point2D;
import javafx.scene.Node;
import javafx.scene.Scene;
import javafx.scene.input.KeyCode;
import javafx.scene.input.KeyCodeCombination;
import javafx.scene.input.KeyCombination;
import junit.framework.AssertionFailedError;
import seedu.task.TestApp;
import seedu.task.commons.exceptions.IllegalValueException;
import seedu.task.commons.util.FileUtil;
import seedu.task.commons.util.XmlUtil;
import seedu.task.model.TaskManager;
import seedu.task.model.tag.Tag;
import seedu.task.model.tag.UniqueTagList;
import seedu.task.model.task.Date;
import seedu.task.model.task.Location;
import seedu.task.model.task.Name;
import seedu.task.model.task.ReadOnlyTask;
import seedu.task.model.task.Remark;
import seedu.task.model.task.Task;
import seedu.task.storage.XmlSerializableTaskManager;
/**
* A utility class for test cases.
*/
public class TestUtil {
public static final String LS = System.lineSeparator();
/**
* Folder used for temp files created during testing. Ignored by Git.
*/
public static final String SANDBOX_FOLDER = FileUtil.getPath("./src/test/data/sandbox/");
public static final Task[] SAMPLE_TASK_DATA = getSampleTaskData();
public static final Tag[] SAMPLE_TAG_DATA = getSampleTagData();
public static void assertThrows(Class<? extends Throwable> expected, Runnable executable) {
try {
executable.run();
} catch (Throwable actualException) {
if (actualException.getClass().isAssignableFrom(expected)) {
return;
}
String message = String.format("Expected thrown: %s, actual: %s", expected.getName(),
actualException.getClass().getName());
throw new AssertionFailedError(message);
}
throw new AssertionFailedError(
String.format("Expected %s to be thrown, but nothing was thrown.", expected.getName()));
}
private static Task[] getSampleTaskData() {
try {
// CHECKSTYLE.OFF: LineLength
return new Task[] {
new Task(new Name("Ali Muster"), new Date("1-02-24"), new Date("1-03-2424"),
new Remark("[email protected]"), new Location("4th street"), new UniqueTagList(), false, ""),
new Task(new Name("Boris Mueller"), new Date("1-02-24"), new Date("1-09-2245"),
new Remark("[email protected]"), new Location("81th street"), new UniqueTagList(), false, ""),
new Task(new Name("Carl Kurz"), new Date("1-04-1963"), new Date("1-13-1963"),
new Remark("[email protected]"), new Location("wall street"), new UniqueTagList(), false, ""),
new Task(new Name("Daniel Meier"), new Date("2-12-2000"), new Date("2-17-2003"),
new Remark("[email protected]"), new Location("10th street"), new UniqueTagList(), false,
""),
new Task(new Name("Elle Meyer"), new Date("1-01-2000"), new Date("2-12-2004"),
new Remark("[email protected]"), new Location("michegan ave"), new UniqueTagList(), false,
""),
new Task(new Name("Fiona Kunz"), new Date("2-05-2024"), new Date("04-03-2027"),
new Remark("[email protected]"), new Location("little tokyo"), new UniqueTagList(), false,
""),
new Task(new Name("George Best"), new Date("4-09-14"), new Date("1-02-2042"),
new Remark("[email protected]"), new Location("4th street"), new UniqueTagList(), false, ""),
new Task(new Name("Hoon Meier"), new Date("1-08-2024"), new Date("04-12-2024"),
new Remark("[email protected]"), new Location("little india"), new UniqueTagList(), false,
""),
new Task(new Name("Ida Mueller"), new Date("2-05-30"), new Date("04-12-2031"),
new Remark("[email protected]"), new Location("chicago ave"), new UniqueTagList(), false,
"") };
// CHECKSTYLE.ON: LineLength
} catch (IllegalValueException e) {
assert false;
// not possible
return null;
}
}
private static Tag[] getSampleTagData() {
try {
return new Tag[] { new Tag("relatives"), new Tag("friends") };
} catch (IllegalValueException e) {
assert false;
return null;
// not possible
}
}
public static List<Task> generateSampleTaskData() {
return Arrays.asList(SAMPLE_TASK_DATA);
}
/**
* Appends the file name to the sandbox folder path. Creates the sandbox folder if it doesn't exist.
*
* @param fileName
* @return
*/
public static String getFilePathInSandboxFolder(String fileName) {
try {
FileUtil.createDirs(new File(SANDBOX_FOLDER));
} catch (IOException e) {
throw new RuntimeException(e);
}
return SANDBOX_FOLDER + fileName;
}
public static void createDataFileWithSampleData(String filePath) {
createDataFileWithData(generateSampleStorageTaskManager(), filePath);
}
public static <T> void createDataFileWithData(T data, String filePath) {
try {
File saveFileForTesting = new File(filePath);
FileUtil.createIfMissing(saveFileForTesting);
XmlUtil.saveDataToFile(saveFileForTesting, data);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static void main(String... s) {
createDataFileWithSampleData(TestApp.SAVE_LOCATION_FOR_TESTING);
}
public static XmlSerializableTaskManager generateSampleStorageTaskManager() {
return new XmlSerializableTaskManager(new TaskManager());
}
/**
* Tweaks the {@code keyCodeCombination} to resolve the {@code KeyCode.SHORTCUT} to their respective
* platform-specific keycodes
*/
public static KeyCode[] scrub(KeyCodeCombination keyCodeCombination) {
List<KeyCode> keys = new ArrayList<>();
if (keyCodeCombination.getAlt() == KeyCombination.ModifierValue.DOWN) {
keys.add(KeyCode.ALT);
}
if (keyCodeCombination.getShift() == KeyCombination.ModifierValue.DOWN) {
keys.add(KeyCode.SHIFT);
}
if (keyCodeCombination.getMeta() == KeyCombination.ModifierValue.DOWN) {
keys.add(KeyCode.META);
}
if (keyCodeCombination.getControl() == KeyCombination.ModifierValue.DOWN) {
keys.add(KeyCode.CONTROL);
}
keys.add(keyCodeCombination.getCode());
return keys.toArray(new KeyCode[] {});
}
public static boolean isHeadlessEnvironment() {
String headlessProperty = System.getProperty("testfx.headless");
return headlessProperty != null && headlessProperty.equals("true");
}
public static void captureScreenShot(String fileName) {
File file = GuiTest.captureScreenshot();
try {
Files.copy(file, new File(fileName + ".png"));
} catch (IOException e) {
e.printStackTrace();
}
}
public static String descOnFail(Object... comparedObjects) {
return "Comparison failed \n"
+ Arrays.asList(comparedObjects).stream().map(Object::toString).collect(Collectors.joining("\n"));
}
public static void setFinalStatic(Field field, Object newValue)
throws NoSuchFieldException, IllegalAccessException {
field.setAccessible(true);
// remove final modifier from field
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
// ~Modifier.FINAL is used to remove the final modifier from field so
// that its value is no longer
// final and can be changed
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.set(null, newValue);
}
public static void initRuntime() throws TimeoutException {
FxToolkit.registerPrimaryStage();
FxToolkit.hideStage();
}
public static void tearDownRuntime() throws Exception {
FxToolkit.cleanupStages();
}
/**
* Gets private method of a class Invoke the method using method.invoke(objectInstance, params...)
*
* Caveat: only find method declared in the current Class, not inherited from supertypes
*/
public static Method getPrivateMethod(Class<?> objectClass, String methodName) throws NoSuchMethodException {
Method method = objectClass.getDeclaredMethod(methodName);
method.setAccessible(true);
return method;
}
public static void renameFile(File file, String newFileName) {
try {
Files.copy(file, new File(newFileName));
} catch (IOException e1) {
e1.printStackTrace();
}
}
/**
* Gets mid point of a node relative to the screen.
*
* @param node
* @return
*/
public static Point2D getScreenMidPoint(Node node) {
double x = getScreenPos(node).getMinX() + node.getLayoutBounds().getWidth() / 2;
double y = getScreenPos(node).getMinY() + node.getLayoutBounds().getHeight() / 2;
return new Point2D(x, y);
}
/**
* Gets mid point of a node relative to its scene.
*
* @param node
* @return
*/
public static Point2D getSceneMidPoint(Node node) {
double x = getScenePos(node).getMinX() + node.getLayoutBounds().getWidth() / 2;
double y = getScenePos(node).getMinY() + node.getLayoutBounds().getHeight() / 2;
return new Point2D(x, y);
}
/**
* Gets the bound of the node relative to the parent scene.
*
* @param node
* @return
*/
public static Bounds getScenePos(Node node) {
return node.localToScene(node.getBoundsInLocal());
}
public static Bounds getScreenPos(Node node) {
return node.localToScreen(node.getBoundsInLocal());
}
public static double getSceneMaxX(Scene scene) {
return scene.getX() + scene.getWidth();
}
public static double getSceneMaxY(Scene scene) {
return scene.getX() + scene.getHeight();
}
public static Object getLastElement(List<?> list) {
return list.get(list.size() - 1);
}
/**
* Removes a subset from the list of tasks.
*
* @param tasks
* The list of tasks
* @param tasksToRemove
* The subset of tasks.
* @return The modified tasks after removal of the subset from tasks.
*/
public static TestTask[] removeTasksFromList(final TestTask[] tasks, TestTask... tasksToRemove) {
List<TestTask> listOfTasks = asList(tasks);
listOfTasks.removeAll(asList(tasksToRemove));
return listOfTasks.toArray(new TestTask[listOfTasks.size()]);
}
/**
* Returns a copy of the list with the task at specified index removed.
*
* @param list
* original list to copy from
* @param targetIndexInOneIndexedFormat
* e.g. index 1 if the first element is to be removed
*/
public static TestTask[] removeTaskFromList(final TestTask[] list, int targetIndexInOneIndexedFormat) {
return removeTasksFromList(list, list[targetIndexInOneIndexedFormat - 1]);
}
/**
* Replaces tasks[i] with a task.
*
* @param tasks
* The array of tasks.
* @param task
* The replacement task
* @param index
* The index of the task to be replaced.
* @return
*/
public static TestTask[] replaceTaskFromList(TestTask[] tasks, TestTask task, int index) {
tasks[index] = task;
return tasks;
}
/**
* Appends tasks to the array of tasks.
*
* @param tasks
* A array of tasks.
* @param tasksToAdd
* The tasks that are to be appended behind the original array.
* @return The modified array of tasks.
*/
public static TestTask[] addTasksToList(final TestTask[] tasks, TestTask... tasksToAdd) {
List<TestTask> listOfTasks = asList(tasks);
// adds each task one by one, most recent one on top
for (TestTask t : tasksToAdd) {
listOfTasks.add(0, t);
}
return listOfTasks.toArray(new TestTask[listOfTasks.size()]);
}
private static <T> List<T> asList(T[] objs) {
List<T> list = new ArrayList<>();
for (T obj : objs) {
list.add(obj);
}
return list;
}
public static boolean compareCardAndTask(TaskCardHandle card, ReadOnlyTask task) {
return card.isSameTask(task);
}
public static Tag[] getTagList(String tags) {
if ("".equals(tags)) {
return new Tag[] {};
}
final String[] split = tags.split(", ");
final List<Tag> collect = Arrays.asList(split).stream().map(e -> {
try {
return new Tag(e.replaceFirst("Tag: ", ""));
} catch (IllegalValueException e1) {
// not possible
assert false;
return null;
}
}).collect(Collectors.toList());
return collect.toArray(new Tag[split.length]);
}
public static TestTask[] giveSortedList(final TestTask[] tasks) {
List<TestTask> listOfTasks = asList(tasks);
Collections.sort(listOfTasks);
return listOfTasks.toArray(new TestTask[listOfTasks.size()]);
}
public static ReadOnlyTask[] giveSortedList(ReadOnlyTask... tasks) {
List<ReadOnlyTask> listOfTasks = asList(tasks);
Collections.sort(listOfTasks);
return listOfTasks.toArray(new ReadOnlyTask[listOfTasks.size()]);
}
public static TestTask[] filterDoneTasks(final TestTask[] tasks) {
List<TestTask> filteredTaskList = new ArrayList<>();
for (TestTask tt : tasks) {
if (tt.isDone()) {
filteredTaskList.add(tt);
}
}
return filteredTaskList.toArray(new TestTask[filteredTaskList.size()]);
}
public static TestTask[] filterUndoneTasks(final TestTask[] tasks) {
List<TestTask> filteredTaskList = new ArrayList<>();
for (TestTask tt : tasks) {
if (!tt.isDone()) {
filteredTaskList.add(tt);
}
}
return filteredTaskList.toArray(new TestTask[filteredTaskList.size()]);
}
public static TestTask[] filterFloatTasks(final TestTask[] tasks) {
List<TestTask> filteredTaskList = new ArrayList<>();
for (TestTask tt : tasks) {
if (tt.getStartDate().isNull() && tt.getEndDate().isNull()) {
filteredTaskList.add(tt);
}
}
return filteredTaskList.toArray(new TestTask[filteredTaskList.size()]);
}
}
|
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.machinelearning.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* The datasource details that are specific to Amazon RDS.
* </p>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class RDSMetadata implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The database details required to connect to an Amazon RDS.
* </p>
*/
private RDSDatabase database;
private String databaseUserName;
/**
* <p>
* The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code> is
* true in <code>GetDataSourceInput</code>.
* </p>
*/
private String selectSqlQuery;
/**
* <p>
* The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from
* Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*/
private String resourceRole;
/**
* <p>
* The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*/
private String serviceRole;
/**
* <p>
* The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use
* the ID to find details about the instance in the Data Pipeline console.
* </p>
*/
private String dataPipelineId;
/**
* <p>
* The database details required to connect to an Amazon RDS.
* </p>
*
* @param database
* The database details required to connect to an Amazon RDS.
*/
public void setDatabase(RDSDatabase database) {
this.database = database;
}
/**
* <p>
* The database details required to connect to an Amazon RDS.
* </p>
*
* @return The database details required to connect to an Amazon RDS.
*/
public RDSDatabase getDatabase() {
return this.database;
}
/**
* <p>
* The database details required to connect to an Amazon RDS.
* </p>
*
* @param database
* The database details required to connect to an Amazon RDS.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RDSMetadata withDatabase(RDSDatabase database) {
setDatabase(database);
return this;
}
/**
* @param databaseUserName
*/
public void setDatabaseUserName(String databaseUserName) {
this.databaseUserName = databaseUserName;
}
/**
* @return
*/
public String getDatabaseUserName() {
return this.databaseUserName;
}
/**
* @param databaseUserName
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RDSMetadata withDatabaseUserName(String databaseUserName) {
setDatabaseUserName(databaseUserName);
return this;
}
/**
* <p>
* The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code> is
* true in <code>GetDataSourceInput</code>.
* </p>
*
* @param selectSqlQuery
* The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code>
* is true in <code>GetDataSourceInput</code>.
*/
public void setSelectSqlQuery(String selectSqlQuery) {
this.selectSqlQuery = selectSqlQuery;
}
/**
* <p>
* The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code> is
* true in <code>GetDataSourceInput</code>.
* </p>
*
* @return The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if
* <code>Verbose</code> is true in <code>GetDataSourceInput</code>.
*/
public String getSelectSqlQuery() {
return this.selectSqlQuery;
}
/**
* <p>
* The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code> is
* true in <code>GetDataSourceInput</code>.
* </p>
*
* @param selectSqlQuery
* The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code>
* is true in <code>GetDataSourceInput</code>.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RDSMetadata withSelectSqlQuery(String selectSqlQuery) {
setSelectSqlQuery(selectSqlQuery);
return this;
}
/**
* <p>
* The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from
* Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*
* @param resourceRole
* The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a>
* for data pipelines.
*/
public void setResourceRole(String resourceRole) {
this.resourceRole = resourceRole;
}
/**
* <p>
* The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from
* Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*
* @return The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a>
* for data pipelines.
*/
public String getResourceRole() {
return this.resourceRole;
}
/**
* <p>
* The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from
* Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*
* @param resourceRole
* The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a>
* for data pipelines.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RDSMetadata withResourceRole(String resourceRole) {
setResourceRole(resourceRole);
return this;
}
/**
* <p>
* The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*
* @param serviceRole
* The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the
* copy task from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a>
* for data pipelines.
*/
public void setServiceRole(String serviceRole) {
this.serviceRole = serviceRole;
}
/**
* <p>
* The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*
* @return The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the
* copy task from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a>
* for data pipelines.
*/
public String getServiceRole() {
return this.serviceRole;
}
/**
* <p>
* The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task
* from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for
* data pipelines.
* </p>
*
* @param serviceRole
* The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the
* copy task from Amazon RDS to Amazon S3. For more information, see <a
* href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a>
* for data pipelines.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RDSMetadata withServiceRole(String serviceRole) {
setServiceRole(serviceRole);
return this;
}
/**
* <p>
* The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use
* the ID to find details about the instance in the Data Pipeline console.
* </p>
*
* @param dataPipelineId
* The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You
* can use the ID to find details about the instance in the Data Pipeline console.
*/
public void setDataPipelineId(String dataPipelineId) {
this.dataPipelineId = dataPipelineId;
}
/**
* <p>
* The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use
* the ID to find details about the instance in the Data Pipeline console.
* </p>
*
* @return The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You
* can use the ID to find details about the instance in the Data Pipeline console.
*/
public String getDataPipelineId() {
return this.dataPipelineId;
}
/**
* <p>
* The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use
* the ID to find details about the instance in the Data Pipeline console.
* </p>
*
* @param dataPipelineId
* The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You
* can use the ID to find details about the instance in the Data Pipeline console.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RDSMetadata withDataPipelineId(String dataPipelineId) {
setDataPipelineId(dataPipelineId);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getDatabase() != null)
sb.append("Database: ").append(getDatabase()).append(",");
if (getDatabaseUserName() != null)
sb.append("DatabaseUserName: ").append(getDatabaseUserName()).append(",");
if (getSelectSqlQuery() != null)
sb.append("SelectSqlQuery: ").append(getSelectSqlQuery()).append(",");
if (getResourceRole() != null)
sb.append("ResourceRole: ").append(getResourceRole()).append(",");
if (getServiceRole() != null)
sb.append("ServiceRole: ").append(getServiceRole()).append(",");
if (getDataPipelineId() != null)
sb.append("DataPipelineId: ").append(getDataPipelineId());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof RDSMetadata == false)
return false;
RDSMetadata other = (RDSMetadata) obj;
if (other.getDatabase() == null ^ this.getDatabase() == null)
return false;
if (other.getDatabase() != null && other.getDatabase().equals(this.getDatabase()) == false)
return false;
if (other.getDatabaseUserName() == null ^ this.getDatabaseUserName() == null)
return false;
if (other.getDatabaseUserName() != null && other.getDatabaseUserName().equals(this.getDatabaseUserName()) == false)
return false;
if (other.getSelectSqlQuery() == null ^ this.getSelectSqlQuery() == null)
return false;
if (other.getSelectSqlQuery() != null && other.getSelectSqlQuery().equals(this.getSelectSqlQuery()) == false)
return false;
if (other.getResourceRole() == null ^ this.getResourceRole() == null)
return false;
if (other.getResourceRole() != null && other.getResourceRole().equals(this.getResourceRole()) == false)
return false;
if (other.getServiceRole() == null ^ this.getServiceRole() == null)
return false;
if (other.getServiceRole() != null && other.getServiceRole().equals(this.getServiceRole()) == false)
return false;
if (other.getDataPipelineId() == null ^ this.getDataPipelineId() == null)
return false;
if (other.getDataPipelineId() != null && other.getDataPipelineId().equals(this.getDataPipelineId()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getDatabase() == null) ? 0 : getDatabase().hashCode());
hashCode = prime * hashCode + ((getDatabaseUserName() == null) ? 0 : getDatabaseUserName().hashCode());
hashCode = prime * hashCode + ((getSelectSqlQuery() == null) ? 0 : getSelectSqlQuery().hashCode());
hashCode = prime * hashCode + ((getResourceRole() == null) ? 0 : getResourceRole().hashCode());
hashCode = prime * hashCode + ((getServiceRole() == null) ? 0 : getServiceRole().hashCode());
hashCode = prime * hashCode + ((getDataPipelineId() == null) ? 0 : getDataPipelineId().hashCode());
return hashCode;
}
@Override
public RDSMetadata clone() {
try {
return (RDSMetadata) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.machinelearning.model.transform.RDSMetadataMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import junit.framework.Assert;
import org.apache.hadoop.util.FindClass;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test the find class logic
*/
public class TestFindClass extends Assert {
private static final Logger LOG =
LoggerFactory.getLogger(TestFindClass.class);
public static final String LOG4J_PROPERTIES = "log4j.properties";
/**
* Run the tool runner instance
* @param expected expected return code
* @param args a list of arguments
* @throws Exception on any falure that is not handled earlier
*/
private void run(int expected, String... args) throws Exception {
int result = ToolRunner.run(new FindClass(), args);
assertEquals(expected, result);
}
@Test
public void testUsage() throws Throwable {
run(FindClass.E_USAGE, "org.apache.hadoop.util.TestFindClass");
}
@Test
public void testFindsResource() throws Throwable {
run(FindClass.SUCCESS,
FindClass.A_RESOURCE, "org/apache/hadoop/util/TestFindClass.class");
}
@Test
public void testFailsNoSuchResource() throws Throwable {
run(FindClass.E_NOT_FOUND,
FindClass.A_RESOURCE,
"org/apache/hadoop/util/ThereIsNoSuchClass.class");
}
@Test
public void testLoadFindsSelf() throws Throwable {
run(FindClass.SUCCESS,
FindClass.A_LOAD, "org.apache.hadoop.util.TestFindClass");
}
@Test
public void testLoadFailsNoSuchClass() throws Throwable {
run(FindClass.E_NOT_FOUND,
FindClass.A_LOAD, "org.apache.hadoop.util.ThereIsNoSuchClass");
}
@Test
public void testLoadWithErrorInStaticInit() throws Throwable {
run(FindClass.E_LOAD_FAILED,
FindClass.A_LOAD,
"org.apache.hadoop.util.TestFindClass$FailInStaticInit");
}
@Test
public void testCreateHandlesBadToString() throws Throwable {
run(FindClass.SUCCESS,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$BadToStringClass");
}
@Test
public void testCreatesClass() throws Throwable {
run(FindClass.SUCCESS,
FindClass.A_CREATE, "org.apache.hadoop.util.TestFindClass");
}
@Test
public void testCreateFailsInStaticInit() throws Throwable {
run(FindClass.E_LOAD_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$FailInStaticInit");
}
@Test
public void testCreateFailsInConstructor() throws Throwable {
run(FindClass.E_CREATE_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$FailInConstructor");
}
@Test
public void testCreateFailsNoEmptyConstructor() throws Throwable {
run(FindClass.E_CREATE_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$NoEmptyConstructor");
}
@Test
public void testLoadPrivateClass() throws Throwable {
run(FindClass.SUCCESS,
FindClass.A_LOAD, "org.apache.hadoop.util.TestFindClass$PrivateClass");
}
@Test
public void testCreateFailsPrivateClass() throws Throwable {
run(FindClass.E_CREATE_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$PrivateClass");
}
@Test
public void testCreateFailsInPrivateConstructor() throws Throwable {
run(FindClass.E_CREATE_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$PrivateConstructor");
}
@Test
public void testLoadFindsLog4J() throws Throwable {
run(FindClass.SUCCESS, FindClass.A_RESOURCE, LOG4J_PROPERTIES);
}
@SuppressWarnings("UseOfSystemOutOrSystemErr")
@Test
public void testPrintLog4J() throws Throwable {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream out = new PrintStream(baos);
FindClass.setOutputStreams(out, System.err);
run(FindClass.SUCCESS, FindClass.A_PRINTRESOURCE, LOG4J_PROPERTIES);
//here the content should be done
out.flush();
String body = baos.toString("UTF8");
LOG.info(LOG4J_PROPERTIES + " =\n" + body);
assertTrue(body.contains("Apache"));
}
/**
* trigger a divide by zero fault in the static init
*/
public static class FailInStaticInit {
static {
int x = 0;
int y = 1 / x;
}
}
/**
* trigger a divide by zero fault in the constructor
*/
public static class FailInConstructor {
public FailInConstructor() {
int x = 0;
int y = 1 / x;
}
}
/**
* A class with no parameterless constructor -expect creation to fail
*/
public static class NoEmptyConstructor {
public NoEmptyConstructor(String text) {
}
}
/**
* This has triggers an NPE in the toString() method; checks the logging
* code handles this.
*/
public static class BadToStringClass {
public BadToStringClass() {
}
@Override
public String toString() {
throw new NullPointerException("oops");
}
}
/**
* This has a private constructor
* -creating it will trigger an IllegalAccessException
*/
public static class PrivateClass {
private PrivateClass() {
}
}
/**
* This has a private constructor
* -creating it will trigger an IllegalAccessException
*/
public static class PrivateConstructor {
private PrivateConstructor() {
}
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.local;
import java.io.Externalizable;
import java.util.Collection;
import java.util.concurrent.Callable;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.CacheEntryPredicate;
import org.apache.ignite.internal.processors.cache.CacheObject;
import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
import org.apache.ignite.internal.processors.cache.GridCacheFuture;
import org.apache.ignite.internal.processors.cache.GridCacheMapEntry;
import org.apache.ignite.internal.processors.cache.GridCacheMapEntryFactory;
import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
import org.apache.ignite.internal.processors.cache.GridCachePreloader;
import org.apache.ignite.internal.processors.cache.GridCachePreloaderAdapter;
import org.apache.ignite.internal.processors.cache.KeyCacheObject;
import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.transactions.TransactionIsolation;
import org.jetbrains.annotations.Nullable;
/**
* Local cache implementation.
*/
public class GridLocalCache<K, V> extends GridCacheAdapter<K, V> {
/** */
private static final long serialVersionUID = 0L;
/** */
private GridCachePreloader preldr;
/**
* Empty constructor required by {@link Externalizable}.
*/
public GridLocalCache() {
// No-op.
}
/**
* @param ctx Cache registry.
*/
public GridLocalCache(GridCacheContext<K, V> ctx) {
super(ctx, ctx.config().getStartSize());
preldr = new GridCachePreloaderAdapter(ctx);
}
/** {@inheritDoc} */
@Override public boolean isLocal() {
return true;
}
/** {@inheritDoc} */
@Override public GridCachePreloader preloader() {
return preldr;
}
/** {@inheritDoc} */
@Override protected void init() {
map.setEntryFactory(new GridCacheMapEntryFactory() {
/** {@inheritDoc} */
@Override public GridCacheMapEntry create(
GridCacheContext ctx,
AffinityTopologyVersion topVer,
KeyCacheObject key,
int hash,
CacheObject val,
GridCacheMapEntry next,
int hdrId
) {
return new GridLocalCacheEntry(ctx, key, hash, val, next, hdrId);
}
});
}
/**
* @param key Key of entry.
* @return Cache entry.
*/
@Nullable GridLocalCacheEntry peekExx(KeyCacheObject key) {
return (GridLocalCacheEntry)peekEx(key);
}
/**
* @param key Key of entry.
* @return Cache entry.
*/
GridLocalCacheEntry entryExx(KeyCacheObject key) {
return (GridLocalCacheEntry)entryEx(key);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> txLockAsync(Collection<KeyCacheObject> keys,
long timeout,
IgniteTxLocalEx tx,
boolean isRead,
boolean retval,
TransactionIsolation isolation,
boolean invalidate,
long accessTtl) {
return lockAllAsync(keys, timeout, tx, CU.empty0());
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> lockAllAsync(Collection<? extends K> keys, long timeout) {
IgniteTxLocalEx tx = ctx.tm().localTx();
return lockAllAsync(ctx.cacheKeysView(keys), timeout, tx, CU.empty0());
}
/**
* @param keys Keys.
* @param timeout Timeout.
* @param tx Transaction.
* @param filter Filter.
* @return Future.
*/
public IgniteInternalFuture<Boolean> lockAllAsync(Collection<KeyCacheObject> keys,
long timeout,
@Nullable IgniteTxLocalEx tx,
CacheEntryPredicate[] filter) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(true);
GridLocalLockFuture<K, V> fut = new GridLocalLockFuture<>(ctx, keys, tx, this, timeout, filter);
try {
for (KeyCacheObject key : keys) {
while (true) {
GridLocalCacheEntry entry = null;
try {
entry = entryExx(key);
entry.unswap(false);
if (!ctx.isAll(entry, filter)) {
fut.onFailed();
return fut;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = fut.addEntry(entry);
if (cand == null && fut.isDone())
return fut;
break;
}
catch (GridCacheEntryRemovedException ignored) {
if (log().isDebugEnabled())
log().debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
}
if (!ctx.mvcc().addFuture(fut))
fut.onError(new IgniteCheckedException("Duplicate future ID (internal error): " + fut));
// Must have future added prior to checking locks.
fut.checkLocks();
return fut;
}
catch (IgniteCheckedException e) {
fut.onError(e);
return fut;
}
}
/** {@inheritDoc} */
@Override public void unlockAll(
Collection<? extends K> keys
) throws IgniteCheckedException {
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
for (K key : keys) {
GridLocalCacheEntry entry = peekExx(ctx.toCacheKeyObject(key));
if (entry != null && ctx.isAll(entry, CU.empty0())) {
entry.releaseLocal();
ctx.evicts().touch(entry, topVer);
}
}
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> removeAllAsync() {
return ctx.closures().callLocalSafe(new Callable<Void>() {
@Override public Void call() throws Exception {
removeAll();
return null;
}
});
}
/** {@inheritDoc} */
@Override public void onDeferredDelete(GridCacheEntryEx entry, GridCacheVersion ver) {
assert false : "Should not be called";
}
/**
* @param fut Clears future from cache.
*/
void onFutureDone(GridCacheFuture<?> fut) {
if (ctx.mvcc().removeFuture(fut)) {
if (log().isDebugEnabled())
log().debug("Explicitly removed future from map of futures: " + fut);
}
}
}
|
|
package YouTubedl2HTML;
import java.io.File;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Observable;
/*
* http://www.java-tips.org/java-se-tips/javax.swing/how-to-create-a-download-manager-in-java.html
*/
//This class downloads a file from a URL.
class Download extends Observable implements Runnable {
// Max size of download buffer.
private static final int MAX_BUFFER_SIZE = 1024;
// These are the status names.
public static final String STATUSES[] = { "Downloading", "Paused",
"Complete", "Cancelled", "Error" };
// These are the status codes.
public static final int DOWNLOADING = 0;
public static final int PAUSED = 1;
public static final int COMPLETE = 2;
public static final int CANCELLED = 3;
public static final int ERROR = 4;
private URL url; // download URL
private int size; // size of download in bytes
private int downloaded; // number of bytes downloaded
private int status; // current status of download
// Constructor for Download.
public Download(URL url) {
this.url = url;
size = -1;
downloaded = 0;
status = DOWNLOADING;
// Begin the download.
download();
}
// Get this download's URL.
public String getUrl() {
return url.toString();
}
// Get this download's size.
public int getSize() {
return size;
}
// Get this download's progress.
public float getProgress() {
return ((float) downloaded / size) * 100;
}
// Get this download's status.
public int getStatus() {
return status;
}
// Pause this download.
public void pause() {
status = PAUSED;
stateChanged();
}
// Resume this download.
public void resume() {
status = DOWNLOADING;
stateChanged();
download();
}
// Cancel this download.
public void cancel() {
status = CANCELLED;
stateChanged();
}
// Mark this download as having an error.
private void error() {
status = ERROR;
stateChanged();
}
// Start or resume downloading.
private void download() {
Thread thread = new Thread(this);
thread.start();
}
// Get file name portion of URL.
private String getFileName(URL url) {
String fileName = url.getFile();
return fileName.substring(fileName.lastIndexOf('/') + 1);
}
// Download file.
public void run() {
RandomAccessFile file = null;
InputStream stream = null;
try {
// Open connection to URL.
HttpURLConnection connection = (HttpURLConnection) url
.openConnection();
// Specify what portion of file to download.
connection.setRequestProperty("Range", "bytes=" + downloaded + "-");
// Connect to server.
connection.connect();
// Make sure response code is in the 200 range.
if (connection.getResponseCode() / 100 != 2) {
error();
}
// Check for valid content length.
int contentLength = connection.getContentLength();
if (contentLength < 1) {
error();
}
/*
* Set the size for this download if it hasn't been already set.
*/
if (size == -1) {
size = contentLength;
stateChanged();
}
// Open file and seek to the end of it.
file = new RandomAccessFile(getFileName(url), "rw");
file.seek(downloaded);
stream = connection.getInputStream();
while (status == DOWNLOADING) {
/*
* Size buffer according to how much of the file is left to
* download.
*/
byte buffer[];
if (size - downloaded > MAX_BUFFER_SIZE) {
buffer = new byte[MAX_BUFFER_SIZE];
} else {
buffer = new byte[size - downloaded];
}
// Read from server into buffer.
int read = stream.read(buffer);
if (read == -1)
break;
// Write buffer to file.
file.write(buffer, 0, read);
downloaded += read;
stateChanged();
}
/*
* Change status to complete if this point was reached because
* downloading has finished.
*/
if (status == DOWNLOADING) {
status = COMPLETE;
stateChanged();
}
} catch (Exception e) {
error();
} finally {
// Close file.
if (file != null) {
try {
file.close();
} catch (Exception e) {
}
}
if (status > 2) {
new File(getFileName(url)).delete();
}
// Close connection to server.
if (stream != null) {
try {
stream.close();
} catch (Exception e) {
}
}
}
}
// Notify observers that this download's status has changed.
private void stateChanged() {
setChanged();
notifyObservers();
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package datafu.hourglass.demo;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import datafu.hourglass.fs.PathUtils;
import datafu.hourglass.test.Schemas;
import datafu.hourglass.test.PartitionCollapsingTests;
import datafu.hourglass.test.TestBase;
import datafu.hourglass.test.util.DailyTrackingWriter;
@Test(groups="pcl")
public class Examples extends TestBase
{
private Logger _log = Logger.getLogger(PartitionCollapsingTests.class);
private static final Schema EVENT_SCHEMA;
private GenericRecord _record;
private DailyTrackingWriter _eventWriter;
static
{
EVENT_SCHEMA = Schemas.createRecordSchema(Examples.class, "Event",
new Field("id", Schema.create(Type.LONG), null, null));
System.out.println("Event schema: " + EVENT_SCHEMA.toString(true));
}
public Examples() throws IOException
{
super();
}
@BeforeClass
public void beforeClass() throws Exception
{
super.beforeClass();
}
@AfterClass
public void afterClass() throws Exception
{
super.afterClass();
}
@BeforeMethod
public void beforeMethod(Method method) throws IOException
{
_log.info("*** Running " + method.getName());
_log.info("*** Cleaning input and output paths");
getFileSystem().delete(new Path(getDataPath(), "data"), true);
getFileSystem().delete(new Path(getDataPath(), "output"), true);
getFileSystem().mkdirs(new Path(getDataPath(), "data"));
getFileSystem().mkdirs(new Path(getDataPath(), "output"));
_record = new GenericData.Record(EVENT_SCHEMA);
_eventWriter = new DailyTrackingWriter(new Path(getDataPath(), "data/event"),EVENT_SCHEMA,getFileSystem());
}
@Test
public void countByMember() throws IOException, InterruptedException, ClassNotFoundException
{
// setup
openDayForEvent(2013, 3, 15);
storeIds(1,1,1);
storeIds(2);
storeIds(3,3);
closeDayForEvent();
openDayForEvent(2013, 3, 16);
storeIds(1,1);
storeIds(2,2);
storeIds(3);
closeDayForEvent();
// run
new CountById().run(createJobConf(),getDataPath() + "/data/event",getDataPath() + "/output");
// verify
checkOutputFolderCount(new Path(getDataPath(), "output"), 1);
HashMap<Long,Integer> counts = loadOutputCounts(new Path(getDataPath(), "output"), "20130316");
checkSize(counts,3);
checkIdCount(counts,1,5);
checkIdCount(counts,2,3);
checkIdCount(counts,3,3);
// more data
openDayForEvent(2013, 3, 17);
storeIds(1,1);
storeIds(2,2,2);
storeIds(3,3);
closeDayForEvent();
// run
new CountById().run(createJobConf(),getDataPath() + "/data/event",getDataPath() + "/output");
counts = loadOutputCounts(new Path(getDataPath(), "output"), "20130317");
checkSize(counts,3);
checkIdCount(counts,1,7);
checkIdCount(counts,2,6);
checkIdCount(counts,3,5);
}
@Test
public void estimateNumMembers() throws IOException, InterruptedException, ClassNotFoundException
{
openDayForEvent(2013, 3, 1);
// lots of members logged in this day
for (int i=1; i<=10000; i++)
{
storeIds(i);
}
closeDayForEvent();
// but only a handful logged in the remaining days
for (int i=2; i<=30; i++)
{
openDayForEvent(2013, 3, i);
storeIds(1,2,3,4,5);
closeDayForEvent();
}
// run
new EstimateCardinality().run(createJobConf(),getDataPath() + "/data/event", getDataPath() + "/output/daily", getDataPath() + "/output/summary",30);
// verify
checkIntermediateFolderCount(new Path(getDataPath(), "output/daily"), 30);
checkOutputFolderCount(new Path(getDataPath(), "output/summary"), 1);
Assert.assertTrue(Math.abs(10000L - loadMemberCount(new Path(getDataPath(), "output/summary"),"20130330").longValue())/10000.0 < 0.005);
// more data
openDayForEvent(2013, 3, 31);
storeIds(6,7,8,9,10);
closeDayForEvent();
// run
new EstimateCardinality().run(createJobConf(),getDataPath() + "/data/event", getDataPath() + "/output/daily", getDataPath() + "/output/summary",30);
// verify
checkIntermediateFolderCount(new Path(getDataPath(), "output/daily"), 31);
checkOutputFolderCount(new Path(getDataPath(), "output/summary"), 1);
Assert.assertEquals(loadMemberCount(new Path(getDataPath(), "output/summary"),"20130331").longValue(),10L);
}
private void openDayForEvent(int year, int month, int day) throws IOException
{
System.out.println(String.format("start day: %04d %02d %02d",year,month,day));
_eventWriter.open(year, month, day);
}
private void closeDayForEvent() throws IOException
{
_eventWriter.close();
}
private void storeIds(long... ids) throws IOException
{
for (long id : ids)
{
storeId(id);
}
}
private void storeId(long id) throws IOException
{
_record.put("id", id);
System.out.println("record: " + _record.toString());
_eventWriter.append(_record);
}
private void checkIdCount(HashMap<Long,Integer> counts, long id, long count)
{
Assert.assertTrue(counts.containsKey(id));
Assert.assertEquals(counts.get(id).intValue(), count);
}
private void checkSize(HashMap<Long,Integer> counts, int expectedSize)
{
if (counts.size() != expectedSize)
{
StringBuilder sb = new StringBuilder("Expected count " + expectedSize + " does not match actual " + counts.size() + ", contents:\n");
List<Long> keys = new ArrayList<Long>(counts.keySet());
Collections.sort(keys);
for (Long k : keys)
{
sb.append(k.toString() + " => " + counts.get(k).toString() + "\n");
}
Assert.fail(sb.toString());
}
}
private void checkOutputFolderCount(Path path, int expectedCount) throws IOException
{
Assert.assertEquals(countOutputFolders(path),expectedCount,"Found: " + listOutputFolders(path));
}
private void checkIntermediateFolderCount(Path path, int expectedCount) throws IOException
{
Assert.assertEquals(countIntermediateFolders(path),expectedCount,"Found: " + listIntermediateFolders(path));
}
private int countIntermediateFolders(Path path) throws IOException
{
FileSystem fs = getFileSystem();
return fs.globStatus(new Path(path,"*/*/*"),PathUtils.nonHiddenPathFilter).length;
}
private int countOutputFolders(Path path) throws IOException
{
FileSystem fs = getFileSystem();
return fs.listStatus(path,PathUtils.nonHiddenPathFilter).length;
}
private String listOutputFolders(Path path) throws IOException
{
StringBuilder sb = new StringBuilder();
for (FileStatus stat : getFileSystem().listStatus(path,PathUtils.nonHiddenPathFilter))
{
sb.append(stat.getPath().getName());
sb.append(",");
}
return sb.toString();
}
private String listIntermediateFolders(Path path) throws IOException
{
StringBuilder sb = new StringBuilder();
for (FileStatus stat : getFileSystem().globStatus(new Path(path,"*/*/*"),PathUtils.nonHiddenPathFilter))
{
sb.append(stat.getPath().getName());
sb.append(",");
}
return sb.toString();
}
private Long loadMemberCount(Path path, String timestamp) throws IOException
{
FileSystem fs = getFileSystem();
Assert.assertTrue(fs.exists(new Path(path, timestamp)));
for (FileStatus stat : fs.globStatus(new Path(path,timestamp + "/*.avro")))
{
_log.info(String.format("found: %s (%d bytes)",stat.getPath(),stat.getLen()));
FSDataInputStream is = fs.open(stat.getPath());
DatumReader <GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileStream<GenericRecord> dataFileStream = new DataFileStream<GenericRecord>(is, reader);
try
{
GenericRecord r = dataFileStream.next();
Long count = (Long)((GenericRecord)r.get("value")).get("count");
Assert.assertNotNull(count);
System.out.println("found count: " + count);
return count;
}
finally
{
dataFileStream.close();
}
}
throw new RuntimeException("found no data");
}
private HashMap<Long,Integer> loadOutputCounts(Path path, String timestamp) throws IOException
{
HashMap<Long,Integer> counts = new HashMap<Long,Integer>();
FileSystem fs = getFileSystem();
Assert.assertTrue(fs.exists(new Path(path, timestamp)));
for (FileStatus stat : fs.globStatus(new Path(path,timestamp + "/*.avro")))
{
_log.info(String.format("found: %s (%d bytes)",stat.getPath(),stat.getLen()));
FSDataInputStream is = fs.open(stat.getPath());
DatumReader <GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileStream<GenericRecord> dataFileStream = new DataFileStream<GenericRecord>(is, reader);
try
{
while (dataFileStream.hasNext())
{
GenericRecord r = dataFileStream.next();
_log.info("found: " + r.toString());
Long memberId = (Long)((GenericRecord)r.get("key")).get("member_id");
Assert.assertNotNull(memberId);
Integer count = (Integer)((GenericRecord)r.get("value")).get("count");
Assert.assertNotNull(count);
Assert.assertFalse(counts.containsKey(memberId));
counts.put(memberId, count);
}
}
finally
{
dataFileStream.close();
}
}
return counts;
}
}
|
|
/*******************************************************************************
* Copyright (c) 2015 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.ibm.ws.massive.esa;
import java.io.File;
import java.io.IOException;
import java.security.InvalidParameterException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.ZipException;
import com.ibm.ws.massive.esa.internal.EsaManifest;
import com.ibm.ws.massive.upload.RepositoryArchiveEntryNotFoundException;
import com.ibm.ws.massive.upload.RepositoryArchiveIOException;
import com.ibm.ws.massive.upload.RepositoryArchiveInvalidEntryException;
import com.ibm.ws.massive.upload.RepositoryUploader;
import com.ibm.ws.massive.upload.internal.MassiveUploader;
import com.ibm.ws.repository.common.enums.AttachmentType;
import com.ibm.ws.repository.common.enums.DisplayPolicy;
import com.ibm.ws.repository.common.enums.InstallPolicy;
import com.ibm.ws.repository.common.enums.LicenseType;
import com.ibm.ws.repository.common.enums.Visibility;
import com.ibm.ws.repository.connections.RepositoryConnection;
import com.ibm.ws.repository.connections.RepositoryConnectionList;
import com.ibm.ws.repository.exceptions.RepositoryBackendException;
import com.ibm.ws.repository.exceptions.RepositoryException;
import com.ibm.ws.repository.exceptions.RepositoryResourceCreationException;
import com.ibm.ws.repository.exceptions.RepositoryResourceException;
import com.ibm.ws.repository.exceptions.RepositoryResourceUpdateException;
import com.ibm.ws.repository.resources.EsaResource;
import com.ibm.ws.repository.resources.internal.AppliesToProcessor;
import com.ibm.ws.repository.resources.writeable.AttachmentResourceWritable;
import com.ibm.ws.repository.resources.writeable.EsaResourceWritable;
import com.ibm.ws.repository.resources.writeable.WritableResourceFactory;
import com.ibm.ws.repository.strategies.writeable.UploadStrategy;
import com.ibm.ws.repository.transport.model.Asset;
/**
* <p>
* This class contains methods for working with ESAs inside MaaSive.
* </p>
*/
public class MassiveEsa extends MassiveUploader implements RepositoryUploader<EsaResourceWritable> {
/** Map of symbolic name to asset to make finding other features easy */
private final Map<EsaIdentifier, EsaResource> allFeatures;
private final Logger logger = Logger.getLogger(MassiveEsa.class.getName());
/**
* Construct a new instance and load all of the existing features inside MaaSive.
*
* @param userId The userId to use to connect to Massive
* @param password The password to use to connect to Massive
* @param apiKey The API key to use to connect to Massive
* @throws RepositoryException
*/
public MassiveEsa(RepositoryConnection repoConnection)
throws RepositoryException {
super(repoConnection);
/*
* Find all of the features that are already in MaaSive so we can set the enabled
* information for them
*/
Collection<EsaResource> allEsas = new RepositoryConnectionList(repoConnection).getAllFeatures();
this.allFeatures = new HashMap<>();
for (EsaResource res : allEsas) {
// All features must provide a single symbolic name so no
// null/size check
logger.log(Level.FINE, "Resource features " + res.getProvideFeature());
EsaIdentifier identifier = new EsaIdentifier(res.getProvideFeature(), res.getVersion(), res.getAppliesTo());
allFeatures.put(identifier, res);
}
}
/**
* This method will add a collection of ESAs into MaaSive
*
* @param esas The ESAs to add
* @return the new {@link EsaResource}s added to massive (will not included any resources that
* were modified as a result of this operation)
* @throws ZipException
* @throws RepositoryResourceCreationException
* @throws RepositoryResourceUpdateException
*/
public Collection<EsaResource> addEsasToMassive(Collection<File> esas, UploadStrategy strategy) throws RepositoryException {
Collection<EsaResource> resources = new HashSet<EsaResource>();
for (File esa : esas) {
EsaResource resource = uploadFile(esa, strategy, null);
resources.add(resource);
}
return resources;
}
/*
* (non-Javadoc)
*
* @see com.ibm.ws.massive.upload.RepositoryUploader#canUploadFile(java.io.File)
*/
@Override
public boolean canUploadFile(File assetFile) {
return assetFile.getName().endsWith(".esa");
}
/*
* (non-Javadoc)
*
* @see com.ibm.ws.massive.upload.RepositoryUploader#uploadFile(java.io.File,
* com.ibm.ws.massive.resources.UploadStrategy)
*/
@Override
public EsaResourceWritable uploadFile(File esa, UploadStrategy strategy, String contentUrl) throws RepositoryException {
ArtifactMetadata artifactMetadata = explodeArtifact(esa);
// Read the meta data from the esa
EsaManifest feature;
try {
feature = EsaManifest
.constructInstance(esa);
} catch (IOException e) {
throw new RepositoryArchiveIOException(e.getMessage(), esa, e);
}
/*
* First see if we already have this feature in MaaSive, note this means we can only have
* one version of the asset in MaaSive at a time
*/
EsaResourceWritable resource = WritableResourceFactory.createEsa(repoConnection);
String symbolicName = feature.getSymbolicName();
String version = feature.getVersion().toString();
String appliesTo = feature.getHeader("IBM-AppliesTo");
EsaIdentifier identifier = new EsaIdentifier(symbolicName, version, appliesTo);
// Massive assets are always English, find the best name
String subsystemName = feature.getHeader("Subsystem-Name",
Locale.ENGLISH);
String shortName = feature.getIbmShortName();
String metadataName = artifactMetadata != null ? artifactMetadata.getName() : null;
final String name;
/*
* We want to be able to override the name in the built ESA with a value supplied in the
* metadata so use this in preference of what is in the ESA so that we can correct any typos
* post-GM
*/
if (metadataName != null && !metadataName.isEmpty()) {
name = metadataName;
} else if (subsystemName != null && !subsystemName.isEmpty()) {
name = subsystemName;
} else if (shortName != null && !shortName.isEmpty()) {
name = shortName;
} else {
// symbolic name is always set
name = symbolicName;
}
resource.setName(name);
String shortDescription = null;
if (artifactMetadata != null) {
shortDescription = artifactMetadata.getShortDescription();
resource.setDescription(artifactMetadata.getLongDescription());
}
if (shortDescription == null) {
shortDescription = feature.getHeader("Subsystem-Description", Locale.ENGLISH);
}
resource.setShortDescription(shortDescription);
resource.setVersion(version);
//Add icon files
processIcons(esa, feature, resource);
String provider = feature.getHeader("Subsystem-Vendor");
if (provider != null && !provider.isEmpty()) {
resource.setProviderName(provider);
if ("IBM".equals(provider)) {
resource.setProviderUrl("http://www.ibm.com");
}
} else {
// Massive breaks completely if the provider is not filled in so
// make sure it is!
throw new InvalidParameterException("Subsystem-Vendor must be set in the manifest headers");
}
// Add custom attributes for WLP
resource.setProvideFeature(symbolicName);
resource.setAppliesTo(feature.getHeader("IBM-AppliesTo"));
Visibility visibility = feature.getVisibility();
resource.setVisibility(visibility);
/*
* Two things affect the display policy - the visibility and the install policy. If a
* private auto feature is set to manual install we need to make it visible so people know
* that it exists and can be installed
*/
DisplayPolicy displayPolicy;
DisplayPolicy webDisplayPolicy;
if (visibility == Visibility.PUBLIC) {
displayPolicy = DisplayPolicy.VISIBLE;
webDisplayPolicy = DisplayPolicy.VISIBLE;
} else {
displayPolicy = DisplayPolicy.HIDDEN;
webDisplayPolicy = DisplayPolicy.HIDDEN;
}
if (feature.isAutoFeature()) {
resource.setProvisionCapability(feature.getHeader("IBM-Provision-Capability"));
String IBMInstallPolicy = feature.getHeader("IBM-Install-Policy");
// Default InstallPolicy is set to MANUAL
InstallPolicy installPolicy;
if (IBMInstallPolicy != null && ("when-satisfied".equals(IBMInstallPolicy))) {
installPolicy = InstallPolicy.WHEN_SATISFIED;
} else {
installPolicy = InstallPolicy.MANUAL;
// As discussed above set the display policy to visible for any manual auto features
displayPolicy = DisplayPolicy.VISIBLE;
webDisplayPolicy = DisplayPolicy.VISIBLE;
}
resource.setInstallPolicy(installPolicy);
}
// if we are dealing with a beta feature hide it otherwise apply the
// display policies from above
if (isBeta(resource.getAppliesTo())) {
resource.setWebDisplayPolicy(DisplayPolicy.HIDDEN);
} else {
resource.setWebDisplayPolicy(webDisplayPolicy);
}
// Always set displayPolicy
resource.setDisplayPolicy(displayPolicy);
// handle required iFixes
String requiredFixes = feature.getHeader("IBM-Require-Fix");
if (requiredFixes != null && !requiredFixes.isEmpty()) {
String[] fixes = requiredFixes.split(",");
for (String fix : fixes) {
fix = fix.trim();
if (!fix.isEmpty()) {
resource.addRequireFix(fix);
}
}
}
resource.setShortName(shortName);
// Calculate which features this relies on
for (String requiredFeature : feature.getRequiredFeatures()) {
resource.addRequireFeature(requiredFeature);
}
// feature.supersededBy is a comma-separated list of shortNames. Add
// each of the elements to either supersededBy or supersededByOptional.
String supersededBy = feature.getSupersededBy();
if (supersededBy != null && !supersededBy.trim().isEmpty()) {
String[] supersededByArray = supersededBy.split(",");
for (String f : supersededByArray) {
// If one of the elements is surrounded by [square brackets] then we
// strip the brackets off and treat it as optional
if (f.startsWith("[")) {
f = f.substring(1, f.length() - 1);
resource.addSupersededByOptional(f);
} else {
resource.addSupersededBy(f);
}
}
}
String attachmentName = symbolicName + ".esa";
addContent(resource, esa, attachmentName, artifactMetadata, contentUrl);
// Set the license type if we're using the feature terms agreement so that we know later
// that there won't be a license information file.
String subsystemLicense = feature.getHeader("Subsystem-License");
if (subsystemLicense != null && subsystemLicense.equals("http://www.ibm.com/licenses/wlp-featureterms-v1")) {
resource.setLicenseType(LicenseType.UNSPECIFIED);
}
if (artifactMetadata != null) {
attachLicenseData(artifactMetadata, resource);
}
// Now look for LI, LA files inside the .esa
// We expect to find them in wlp/lafiles/LI_{Locale} or /LA_{Locale}
try {
processLAandLI(esa, resource, feature);
} catch (IOException e) {
throw new RepositoryArchiveIOException(e.getMessage(), esa, e);
}
resource.setLicenseId(feature.getHeader("Subsystem-License"));
// Publish to massive
try {
resource.uploadToMassive(strategy);
} catch (RepositoryException re) {
throw re;
}
this.allFeatures.put(identifier, resource);
return resource;
}
protected static boolean isBeta(String appliesTo) {
// Use the appliesTo string to determine whether a feature is a Beta or a regular feature.
// Beta features are of the format:
// "com.ibm.websphere.appserver; productVersion=2014.8.0.0; productInstallType=Archive",
if (appliesTo == null) {
return false;
} else {
String regex = ".*productVersion=" + AppliesToProcessor.BETA_REGEX;
boolean matches = appliesTo.matches(regex);
return matches;
}
}
private void processIcons(File esa, EsaManifest feature, EsaResourceWritable resource) throws RepositoryException {
//checking icon file
int size = 0;
String current = "";
String sizeString = "";
String iconName = "";
String subsystemIcon = feature.getHeader("Subsystem-Icon");
if (subsystemIcon != null) {
subsystemIcon = subsystemIcon.replaceAll("\\s", "");
StringTokenizer s = new StringTokenizer(subsystemIcon, ",");
while (s.hasMoreTokens()) {
current = s.nextToken();
if (current.contains(";")) { //if the icon has an associated size
StringTokenizer t = new StringTokenizer(current, ";");
while (t.hasMoreTokens()) {
sizeString = t.nextToken();
if (sizeString.contains("size=")) {
String sizes[] = sizeString.split("size=");
size = Integer.parseInt(sizes[sizes.length - 1]);
} else {
iconName = sizeString;
}
}
} else {
iconName = current;
}
File icon = this.extractFileFromArchive(esa.getAbsolutePath(), iconName).getExtractedFile();
if (icon.exists()) {
AttachmentResourceWritable at = resource.addAttachment(icon, AttachmentType.THUMBNAIL);
if (size != 0) {
at.setImageDimensions(size, size);
}
} else {
throw new RepositoryArchiveEntryNotFoundException("Icon does not exist", esa, iconName);
}
}
}
}
/**
* Utility method to delete all the features in MaaSive.
*
* @throws RepositoryBackendException
*
* @throws IOException
*/
public void deleteAllFeatures() throws RepositoryResourceException, RepositoryBackendException {
Iterator<EsaResource> featureIterator = this.allFeatures
.values().iterator();
while (featureIterator.hasNext()) {
EsaResourceWritable featureResource = (EsaResourceWritable) featureIterator.next();
logger.log(Level.INFO, "Deleting " + featureResource.getId());
featureResource.delete();
featureIterator.remove();
}
}
/**
* Utility method to delete certain features in MaaSive. All versions of the feature will be
* deleted.
*
* @param featureNames The feature symbolic names or IBM short names to delete
* @throws RepositoryResourceCreationException
* @throws RepositoryResourceUpdateException
*/
public void deleteFeatures(Collection<String> featureNames)
throws IOException, RepositoryException {
Iterator<EsaResource> featureIterator = this.allFeatures
.values().iterator();
/*
* When we delete a feature we will also need to remove any enabling information that
* reference it so keep track of which other assets we will need to update at the end.
*/
//Collection<EsaResource> assetsForUpdating = new HashSet<EsaResource>();
while (featureIterator.hasNext()) {
EsaResourceWritable featureResource = (EsaResourceWritable) featureIterator.next();
String symbolicName = featureResource.getProvideFeature();
if (featureNames.contains(symbolicName)
|| featureNames.contains(featureResource.getShortName())) {
// removeEnablingInformationForFeature(featureResource, assetsForUpdating);
featureResource.delete();
featureIterator.remove();
}
}
/*
* There is an edge case where the removeEnablingInformationForFeature method is over
* zealous at removing enabling information so run through the add just in case it needs
* adding back in. This will also process all of the assets that we need to update in
* Massive. First make sure we haven't deleted the assets we thought needed updating!
*/
//assetsForUpdating.retainAll(this.allFeatures.values());
// addEnablingInformation(assetsForUpdating);
}
/**
* Returns the first feature with the supplied symbolic name. Unlike {@link #getAllFeatures()}
* this returns the complete feature with attachments.
*
* @param symbolicName The symbolic name of the feature to get
* @return The Asset representing the feature in MaaSive or <code>null</code> if one doesn't
* exist
* @throws IOException
* @throws RepositoryException
*/
public EsaResource getFeature(String symbolicName) throws IOException, RepositoryException {
EsaResource summaryResource = findFeature(symbolicName);
if (summaryResource != null) {
return (EsaResource) repoConnection.getResource(summaryResource.getId());
} else {
return null;
}
}
/**
* Returns the feature with the supplied symbolic name and version. Unlike
* {@link #getAllFeatures()} this returns the complete feature with attachments.
*
* @param symbolicName The symbolic name of the feature to get
* @param version The version of the feature to get
* @return The EsaResource representing the feature in Massive or <code>null</code> if one
* doesn't exist
* @throws IOException
* @throws RepositoryException
*/
public EsaResource getFeature(String symbolicName, String version, String appliesTo) throws IOException, RepositoryException {
EsaResource summaryResource = this.allFeatures.get(new EsaIdentifier(symbolicName, version, appliesTo));
if (summaryResource != null) {
return (EsaResource) repoConnection.getResource(summaryResource.getId());
} else {
return null;
}
}
/**
* Returns all features inside Massive. Note that these are just the summary of features
* returned from Massive's GET all request. Most of the fields will be filled in but the
* attachments will not be. In order to get the attachments as well call
* {@link #getFeature(String)}.
*
* @return A collection of {@link Asset}s representing the features in Massive or an empty
* collection if none exists
*/
public Collection<EsaResource> getAllFeatures() {
/*
* A map's values() collection is backed by the map so if someone clears it we'll be in
* trouble... stop them!
*/
return Collections.unmodifiableCollection(this.allFeatures.values());
}
/**
* Finds a feature with the provided symbolic name
*
* @param symbolicName The symbolic name to look for, must not be <code>null</code>
* @return the first EsaResource with the provided symbolicName that is found or
* <code>null</code> if none is found
*/
private EsaResource findFeature(String symbolicName) {
for (EsaResource esa : this.allFeatures.values()) {
if (symbolicName.equals(esa.getProvideFeature())) {
return esa;
}
}
return null;
}
@Override
protected void checkRequiredProperties(ArtifactMetadata artifact) throws RepositoryArchiveInvalidEntryException {
checkPropertySet(PROP_DESCRIPTION, artifact);
}
/**
* This class holds identification information about an ESA to uniquely identify it and
* implements equals and hash code so it can be used as a lookup key.
*/
private static class EsaIdentifier {
private final String symbolicName;
private final String version;
private final String appliesTo;
/**
* @param symbolicName The symbolic name of the ESA
* @param version The version of the ESA
*/
public EsaIdentifier(String symbolicName, String version, String appliesTo) {
this.symbolicName = symbolicName;
this.version = version;
this.appliesTo = appliesTo;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((symbolicName == null) ? 0 : symbolicName.hashCode());
result = prime * result + ((version == null) ? 0 : version.hashCode());
result = prime * result + ((appliesTo == null) ? 0 : appliesTo.hashCode());
return result;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EsaIdentifier other = (EsaIdentifier) obj;
if (symbolicName == null) {
if (other.symbolicName != null)
return false;
} else if (!symbolicName.equals(other.symbolicName))
return false;
if (version == null) {
if (other.version != null)
return false;
} else if (!version.equals(other.version))
return false;
if (appliesTo == null) {
if (other.appliesTo != null)
return false;
} else if (!appliesTo.equals(other.appliesTo))
return false;
return true;
}
}
}
|
|
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.jet.impl.processor;
import com.hazelcast.function.ConsumerEx;
import com.hazelcast.function.FunctionEx;
import com.hazelcast.jet.config.ProcessingGuarantee;
import com.hazelcast.jet.core.BroadcastKey;
import com.hazelcast.jet.core.Outbox;
import com.hazelcast.jet.core.Processor.Context;
import com.hazelcast.jet.function.RunnableEx;
import com.hazelcast.jet.impl.processor.TwoPhaseSnapshotCommitUtility.TransactionId;
import com.hazelcast.jet.impl.processor.TwoPhaseSnapshotCommitUtility.TransactionalResource;
import javax.annotation.Nonnull;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import java.util.function.Supplier;
import static com.hazelcast.jet.core.BroadcastKey.broadcastKey;
import static com.hazelcast.jet.impl.util.ExceptionUtil.sneakyThrow;
/**
* Utility to handle transactions in a processor that is able to have unbounded
* number of open transactions and is able to enumerate those pertaining to a
* job.
*
* @param <RES> the transaction type
*/
public class UnboundedTransactionsProcessorUtility<TXN_ID extends TransactionId, RES extends TransactionalResource<TXN_ID>>
extends TwoPhaseSnapshotCommitUtility<TXN_ID, RES> {
private final Supplier<TXN_ID> createTxnIdFn;
private final RunnableEx abortUnfinishedTransactionsAction;
private LoggingNonThrowingResource<TXN_ID, RES> activeTransaction;
private final List<LoggingNonThrowingResource<TXN_ID, RES>> pendingTransactions;
private final Queue<TXN_ID> snapshotQueue = new ArrayDeque<>();
private boolean initialized;
private boolean snapshotInProgress;
/**
* @param abortUnfinishedTransactionsAction when called, it should abort
* all unfinished transactions found in the external system that
* pertain to the processor
*/
public UnboundedTransactionsProcessorUtility(
@Nonnull Outbox outbox,
@Nonnull Context procContext,
@Nonnull ProcessingGuarantee externalGuarantee,
@Nonnull Supplier<TXN_ID> createTxnIdFn,
@Nonnull FunctionEx<TXN_ID, RES> createTxnFn,
@Nonnull ConsumerEx<TXN_ID> recoverAndCommitFn,
@Nonnull RunnableEx abortUnfinishedTransactionsAction
) {
super(outbox, procContext, false, externalGuarantee, createTxnFn, recoverAndCommitFn,
txnId -> {
throw new UnsupportedOperationException();
});
this.createTxnIdFn = createTxnIdFn;
this.abortUnfinishedTransactionsAction = abortUnfinishedTransactionsAction;
pendingTransactions = usesTransactionLifecycle() ? new ArrayList<>() : null;
}
@Nonnull @Override
public RES activeTransaction() {
if (activeTransaction == null) {
if (!initialized) {
if (usesTransactionLifecycle()) {
try {
procContext().logger().fine("aborting unfinished transactions");
abortUnfinishedTransactionsAction.run();
} catch (Exception e) {
throw sneakyThrow(e);
}
}
initialized = true;
}
activeTransaction = createTxnFn().apply(createTxnIdFn.get());
if (usesTransactionLifecycle()) {
activeTransaction.begin();
}
}
return activeTransaction.wrapped();
}
/**
* Force a new transaction outside of the snapshot cycle. The next call to
* {@link #activeTransaction()} will return a new transaction.
*/
public void finishActiveTransaction() {
if (activeTransaction == null) {
return;
}
if (usesTransactionLifecycle()) {
pendingTransactions.add(activeTransaction);
activeTransaction.endAndPrepare();
} else {
activeTransaction.release();
}
activeTransaction = null;
}
@Override
public void afterCompleted() {
if (activeTransaction == null) {
return;
}
if (usesTransactionLifecycle()) {
pendingTransactions.add(activeTransaction);
if (!snapshotInProgress) {
commitPendingTransactions();
}
} else {
activeTransaction.release();
}
activeTransaction = null;
}
@Override
public boolean snapshotCommitPrepare() {
if (usesTransactionLifecycle()) {
if (snapshotQueue.isEmpty()) {
finishActiveTransaction();
for (LoggingNonThrowingResource<TXN_ID, RES> txn : pendingTransactions) {
snapshotQueue.add(txn.id());
}
}
} else {
if (activeTransaction != null) {
activeTransaction.flush();
}
}
for (TXN_ID txnId; (txnId = snapshotQueue.peek()) != null; ) {
if (!getOutbox().offerToSnapshot(broadcastKey(txnId), false)) {
return false;
}
snapshotQueue.remove();
}
snapshotInProgress = true;
return true;
}
@Override
public boolean snapshotCommitFinish(boolean success) {
assert snapshotInProgress : "no snapshot in progress";
snapshotInProgress = false;
if (usesTransactionLifecycle() && success) {
commitPendingTransactions();
}
return true;
}
private void commitPendingTransactions() {
for (LoggingNonThrowingResource<TXN_ID, RES> txn : pendingTransactions) {
txn.commit();
txn.release();
}
pendingTransactions.clear();
}
@Override
public void restoreFromSnapshot(@Nonnull Object key, @Nonnull Object value) {
@SuppressWarnings("unchecked")
TXN_ID txnId = ((BroadcastKey<TXN_ID>) key).key();
if (txnId.index() % procContext().totalParallelism() == procContext().globalProcessorIndex()) {
recoverAndCommitFn().accept(txnId);
}
}
@Override
public void close() {
if (activeTransaction != null) {
activeTransaction.rollback();
activeTransaction.release();
activeTransaction = null;
}
if (pendingTransactions != null) {
for (LoggingNonThrowingResource<TXN_ID, RES> txn : pendingTransactions) {
txn.release();
}
pendingTransactions.clear();
}
}
}
|
|
/**
*/
package CIM15.IEC61970.LoadModel;
import java.util.Collection;
import org.eclipse.emf.common.notify.NotificationChain;
import org.eclipse.emf.common.util.EList;
import org.eclipse.emf.ecore.EClass;
import org.eclipse.emf.ecore.InternalEObject;
import org.eclipse.emf.ecore.util.BasicInternalEList;
import org.eclipse.emf.ecore.util.InternalEList;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>Conform Load Group</b></em>'.
* <!-- end-user-doc -->
*
* <p>
* The following features are supported:
* <ul>
* <li>{@link CIM15.IEC61970.LoadModel.ConformLoadGroup#getConformLoadSchedules <em>Conform Load Schedules</em>}</li>
* <li>{@link CIM15.IEC61970.LoadModel.ConformLoadGroup#getEnergyConsumers <em>Energy Consumers</em>}</li>
* </ul>
* </p>
*
* @generated
*/
public class ConformLoadGroup extends LoadGroup {
/**
* The cached value of the '{@link #getConformLoadSchedules() <em>Conform Load Schedules</em>}' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getConformLoadSchedules()
* @generated
* @ordered
*/
protected EList<ConformLoadSchedule> conformLoadSchedules;
/**
* The cached value of the '{@link #getEnergyConsumers() <em>Energy Consumers</em>}' reference list.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getEnergyConsumers()
* @generated
* @ordered
*/
protected EList<ConformLoad> energyConsumers;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected ConformLoadGroup() {
super();
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected EClass eStaticClass() {
return LoadModelPackage.Literals.CONFORM_LOAD_GROUP;
}
/**
* Returns the value of the '<em><b>Conform Load Schedules</b></em>' reference list.
* The list contents are of type {@link CIM15.IEC61970.LoadModel.ConformLoadSchedule}.
* It is bidirectional and its opposite is '{@link CIM15.IEC61970.LoadModel.ConformLoadSchedule#getConformLoadGroup <em>Conform Load Group</em>}'.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Conform Load Schedules</em>' reference list isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Conform Load Schedules</em>' reference list.
* @see CIM15.IEC61970.LoadModel.ConformLoadSchedule#getConformLoadGroup
* @generated
*/
public EList<ConformLoadSchedule> getConformLoadSchedules() {
if (conformLoadSchedules == null) {
conformLoadSchedules = new BasicInternalEList<ConformLoadSchedule>(ConformLoadSchedule.class);
}
return conformLoadSchedules;
}
/**
* Returns the value of the '<em><b>Energy Consumers</b></em>' reference list.
* The list contents are of type {@link CIM15.IEC61970.LoadModel.ConformLoad}.
* It is bidirectional and its opposite is '{@link CIM15.IEC61970.LoadModel.ConformLoad#getLoadGroup <em>Load Group</em>}'.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Energy Consumers</em>' reference list isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Energy Consumers</em>' reference list.
* @see CIM15.IEC61970.LoadModel.ConformLoad#getLoadGroup
* @generated
*/
public EList<ConformLoad> getEnergyConsumers() {
if (energyConsumers == null) {
energyConsumers = new BasicInternalEList<ConformLoad>(ConformLoad.class);
}
return energyConsumers;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@SuppressWarnings("unchecked")
@Override
public NotificationChain eInverseAdd(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
switch (featureID) {
case LoadModelPackage.CONFORM_LOAD_GROUP__CONFORM_LOAD_SCHEDULES:
return ((InternalEList<InternalEObject>)(InternalEList<?>)getConformLoadSchedules()).basicAdd(otherEnd, msgs);
case LoadModelPackage.CONFORM_LOAD_GROUP__ENERGY_CONSUMERS:
return ((InternalEList<InternalEObject>)(InternalEList<?>)getEnergyConsumers()).basicAdd(otherEnd, msgs);
}
return super.eInverseAdd(otherEnd, featureID, msgs);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
switch (featureID) {
case LoadModelPackage.CONFORM_LOAD_GROUP__CONFORM_LOAD_SCHEDULES:
return ((InternalEList<?>)getConformLoadSchedules()).basicRemove(otherEnd, msgs);
case LoadModelPackage.CONFORM_LOAD_GROUP__ENERGY_CONSUMERS:
return ((InternalEList<?>)getEnergyConsumers()).basicRemove(otherEnd, msgs);
}
return super.eInverseRemove(otherEnd, featureID, msgs);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case LoadModelPackage.CONFORM_LOAD_GROUP__CONFORM_LOAD_SCHEDULES:
return getConformLoadSchedules();
case LoadModelPackage.CONFORM_LOAD_GROUP__ENERGY_CONSUMERS:
return getEnergyConsumers();
}
return super.eGet(featureID, resolve, coreType);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@SuppressWarnings("unchecked")
@Override
public void eSet(int featureID, Object newValue) {
switch (featureID) {
case LoadModelPackage.CONFORM_LOAD_GROUP__CONFORM_LOAD_SCHEDULES:
getConformLoadSchedules().clear();
getConformLoadSchedules().addAll((Collection<? extends ConformLoadSchedule>)newValue);
return;
case LoadModelPackage.CONFORM_LOAD_GROUP__ENERGY_CONSUMERS:
getEnergyConsumers().clear();
getEnergyConsumers().addAll((Collection<? extends ConformLoad>)newValue);
return;
}
super.eSet(featureID, newValue);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void eUnset(int featureID) {
switch (featureID) {
case LoadModelPackage.CONFORM_LOAD_GROUP__CONFORM_LOAD_SCHEDULES:
getConformLoadSchedules().clear();
return;
case LoadModelPackage.CONFORM_LOAD_GROUP__ENERGY_CONSUMERS:
getEnergyConsumers().clear();
return;
}
super.eUnset(featureID);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case LoadModelPackage.CONFORM_LOAD_GROUP__CONFORM_LOAD_SCHEDULES:
return conformLoadSchedules != null && !conformLoadSchedules.isEmpty();
case LoadModelPackage.CONFORM_LOAD_GROUP__ENERGY_CONSUMERS:
return energyConsumers != null && !energyConsumers.isEmpty();
}
return super.eIsSet(featureID);
}
} // ConformLoadGroup
|
|
package com.google.tsunami.plugins.detectors.rce.cve202125646;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.tsunami.common.net.http.HttpRequest.get;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.flogger.GoogleLogger;
import com.google.protobuf.util.Timestamps;
import com.google.tsunami.common.data.NetworkServiceUtils;
import com.google.tsunami.common.net.http.HttpClient;
import com.google.tsunami.common.net.http.HttpResponse;
import com.google.tsunami.common.net.http.HttpStatus;
import com.google.tsunami.common.time.UtcClock;
import com.google.tsunami.plugin.PluginType;
import com.google.tsunami.plugin.VulnDetector;
import com.google.tsunami.plugin.annotations.PluginInfo;
import com.google.tsunami.proto.AdditionalDetail;
import com.google.tsunami.proto.DetectionReport;
import com.google.tsunami.proto.DetectionReportList;
import com.google.tsunami.proto.DetectionStatus;
import com.google.tsunami.proto.NetworkService;
import com.google.tsunami.proto.Severity;
import com.google.tsunami.proto.TargetInfo;
import com.google.tsunami.proto.TextData;
import com.google.tsunami.proto.Vulnerability;
import com.google.tsunami.proto.VulnerabilityId;
import java.io.IOException;
import java.time.Clock;
import java.time.Instant;
import java.util.Optional;
import java.util.regex.Pattern;
import javax.inject.Inject;
/** A {@link VulnDetector} that detects the CVE-2021-41773 and CVE-2021-42013 vulnerability. */
@PluginInfo(
type = PluginType.VULN_DETECTION,
name = "ApacheHttpServerCVE202141773VulnDetector",
version = "1.0",
description =
"This detector checks for Apache HTTP Server 2.4.49 Path traversal and "
+ "disclosure vulnerability.",
author = "threedr3am ([email protected])",
bootstrapModule = ApacheHttpServerCVE202141773VulnDetectorBootstrapModule.class)
public class ApacheHttpServerCVE202141773VulnDetector implements VulnDetector {
private static final GoogleLogger logger = GoogleLogger.forEnclosingClass();
private final Clock utcClock;
private final HttpClient httpClient;
private static final Pattern VULNERABILITY_RESPONSE_PATTERN = Pattern.compile("root:[x*]:0:0:");
private static final ImmutableList<String> COMMON_DIRECTORIES =
ImmutableList.of(
"admin",
"album",
"app",
"assets",
"bin",
"console",
"css",
"cgi-bin",
"demo",
"doc",
"eqx",
"files",
"fs",
"html",
"img-sys",
"jquery_ui",
"js",
"media",
"public",
"static",
"tmp",
"upload",
"xls",
"scripts");
private static final ImmutableList<String> COMMON_PAYLOADS =
ImmutableList.of(
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e"
+ "/%2e%2e/etc/passwd",
"/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65"
+ "/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65"
+ "/%%32%65%%32%65/%%32%65%%32%65/%%32%65%%32%65/etc/passwd");
@Inject
ApacheHttpServerCVE202141773VulnDetector(@UtcClock Clock utcClock, HttpClient httpClient) {
this.utcClock = checkNotNull(utcClock);
this.httpClient = checkNotNull(httpClient);
}
@Override
public DetectionReportList detect(
TargetInfo targetInfo, ImmutableList<NetworkService> matchedServices) {
return DetectionReportList.newBuilder()
.addAllDetectionReports(
matchedServices.stream()
.filter(NetworkServiceUtils::isWebService)
.map(this::checkService)
.filter(CheckResult::isVulnerable)
.map(checkResult -> buildDetectionReport(targetInfo, checkResult))
.collect(toImmutableList()))
.build();
}
private CheckResult checkService(NetworkService networkService) {
for (String dir : COMMON_DIRECTORIES) {
for (String payload : COMMON_PAYLOADS) {
CheckResult checkResult = checkUrlWithCommonDirectory(networkService, dir, payload);
if (checkResult.isVulnerable()) {
return checkResult;
}
}
}
return CheckResult.buildForSecureService(networkService);
}
private CheckResult checkUrlWithCommonDirectory(
NetworkService networkService, String directory, String payload) {
String targetUri =
String.format(
"%s%s%s",
NetworkServiceUtils.buildWebApplicationRootUrl(networkService), directory, payload);
try {
HttpResponse response = httpClient.sendAsIs(get(targetUri).withEmptyHeaders().build());
Optional<String> body = response.bodyString();
if (response.status() == HttpStatus.OK
&& body.isPresent()
&& VULNERABILITY_RESPONSE_PATTERN.matcher(body.get()).find()) {
logger.atInfo().log("Received vulnerable response from target %s.", targetUri);
return CheckResult.buildForVulnerableDetection(networkService, targetUri, response);
}
} catch (IOException e) {
logger.atWarning().withCause(e).log("Unable to query '%s'.", targetUri);
}
return CheckResult.buildForSecureService(networkService);
}
private DetectionReport buildDetectionReport(TargetInfo targetInfo, CheckResult checkResult) {
NetworkService vulnerableNetworkService = checkResult.networkService();
return DetectionReport.newBuilder()
.setTargetInfo(targetInfo)
.setNetworkService(vulnerableNetworkService)
.setDetectionTimestamp(Timestamps.fromMillis(Instant.now(utcClock).toEpochMilli()))
.setDetectionStatus(DetectionStatus.VULNERABILITY_VERIFIED)
.setVulnerability(
Vulnerability.newBuilder()
.setMainId(
VulnerabilityId.newBuilder()
.setPublisher("TSUNAMI_COMMUNITY")
.setValue("CVE_2021_41773"))
.setSeverity(Severity.HIGH)
.setTitle("Apache HTTP Server 2.4.49 Path traversal and disclosure vulnerability")
.setDescription(
"A flaw was found in a change made to path normalization in Apache HTTP Server "
+ "2.4.49. An attacker could use a path traversal attack to map URLs to "
+ "files outside the expected document root. "
+ "If files outside of the document root "
+ "are not protected by \"require all denied\" these requests can succeed. "
+ "Additionally this flaw could leak the source of interpreted files "
+ "like CGI scripts. "
+ "This issue is known to be exploited in the wild. "
+ "This issue affects Apache 2.4.49 and 2.4.50 but not earlier versions. "
+ "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41773 "
+ "https://httpd.apache.org/security/vulnerabilities_24.html")
.setRecommendation("Update to 2.4.51 release.")
.addAdditionalDetails(buildAdditionalDetail(checkResult)))
.build();
}
private AdditionalDetail buildAdditionalDetail(CheckResult checkResult) {
checkState(checkResult.isVulnerable());
checkState(checkResult.vulnerableUrl().isPresent());
checkState(checkResult.response().isPresent());
HttpResponse response = checkResult.response().get();
StringBuilder reportBuilder = new StringBuilder();
reportBuilder
.append("Vulnerable target:\n")
.append(checkResult.vulnerableUrl().get())
.append("\n\nResponse:\n")
.append(response.status().code())
.append(' ')
.append(response.status())
.append('\n');
response
.headers()
.names()
.forEach(
headerName ->
response
.headers()
.getAll(headerName)
.forEach(
headerValue ->
reportBuilder
.append(headerName)
.append(": ")
.append(headerValue)
.append('\n')));
response.bodyString().ifPresent(body -> reportBuilder.append('\n').append(body));
return AdditionalDetail.newBuilder()
.setTextData(TextData.newBuilder().setText(reportBuilder.toString()))
.build();
}
@AutoValue
abstract static class CheckResult {
abstract boolean isVulnerable();
abstract NetworkService networkService();
abstract Optional<String> vulnerableUrl();
abstract Optional<HttpResponse> response();
static CheckResult buildForVulnerableDetection(
NetworkService networkService, String url, HttpResponse response) {
return new AutoValue_ApacheHttpServerCVE202141773VulnDetector_CheckResult(
true, networkService, Optional.of(url), Optional.of(response));
}
static CheckResult buildForSecureService(NetworkService networkService) {
return new AutoValue_ApacheHttpServerCVE202141773VulnDetector_CheckResult(
false, networkService, Optional.empty(), Optional.empty());
}
}
}
|
|
/*
* Copyright © 2014-2016 NetApp, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* DO NOT EDIT THIS CODE BY HAND! It has been generated with jsvcgen.
*/
package com.solidfire.element.api;
import com.solidfire.gson.Gson;
import com.solidfire.core.client.Attributes;
import com.solidfire.gson.annotations.SerializedName;
import com.solidfire.core.annotation.Since;
import com.solidfire.core.javautil.Optional;
import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Objects;
/**
* CreateInitiator
* Object containing characteristics of each new initiator to be created.
**/
public class CreateInitiator implements Serializable {
public static final long serialVersionUID = -5226418898391914727L;
@SerializedName("name") private String name;
@SerializedName("alias") private Optional<String> alias;
@SerializedName("volumeAccessGroupID") private Optional<Long> volumeAccessGroupID;
@SerializedName("attributes") private Optional<Attributes> attributes;
@SerializedName("requireChap") private Optional<Boolean> requireChap;
@SerializedName("chapUsername") private Optional<String> chapUsername;
@SerializedName("initiatorSecret") private Optional<CHAPSecret> initiatorSecret;
@SerializedName("targetSecret") private Optional<CHAPSecret> targetSecret;
@SerializedName("virtualNetworkIDs") private Optional<Long[]> virtualNetworkIDs;
// empty constructor
@Since("7.0")
public CreateInitiator() {}
// parameterized constructor
@Since("7.0")
public CreateInitiator(
String name,
Optional<String> alias,
Optional<Long> volumeAccessGroupID,
Optional<Attributes> attributes,
Optional<Boolean> requireChap,
Optional<String> chapUsername,
Optional<CHAPSecret> initiatorSecret,
Optional<CHAPSecret> targetSecret
)
{
this.name = name;
this.alias = (alias == null) ? Optional.<String>empty() : alias;
this.volumeAccessGroupID = (volumeAccessGroupID == null) ? Optional.<Long>empty() : volumeAccessGroupID;
this.attributes = (attributes == null) ? Optional.<Attributes>empty() : attributes;
this.requireChap = (requireChap == null) ? Optional.<Boolean>empty() : requireChap;
this.chapUsername = (chapUsername == null) ? Optional.<String>empty() : chapUsername;
this.initiatorSecret = (initiatorSecret == null) ? Optional.<CHAPSecret>empty() : initiatorSecret;
this.targetSecret = (targetSecret == null) ? Optional.<CHAPSecret>empty() : targetSecret;
}
// parameterized constructor
@Since("12.0")
public CreateInitiator(
String name,
Optional<String> alias,
Optional<Long> volumeAccessGroupID,
Optional<Attributes> attributes,
Optional<Boolean> requireChap,
Optional<String> chapUsername,
Optional<CHAPSecret> initiatorSecret,
Optional<CHAPSecret> targetSecret,
Optional<Long[]> virtualNetworkIDs
)
{
this.name = name;
this.alias = (alias == null) ? Optional.<String>empty() : alias;
this.volumeAccessGroupID = (volumeAccessGroupID == null) ? Optional.<Long>empty() : volumeAccessGroupID;
this.attributes = (attributes == null) ? Optional.<Attributes>empty() : attributes;
this.requireChap = (requireChap == null) ? Optional.<Boolean>empty() : requireChap;
this.chapUsername = (chapUsername == null) ? Optional.<String>empty() : chapUsername;
this.initiatorSecret = (initiatorSecret == null) ? Optional.<CHAPSecret>empty() : initiatorSecret;
this.targetSecret = (targetSecret == null) ? Optional.<CHAPSecret>empty() : targetSecret;
this.virtualNetworkIDs = (virtualNetworkIDs == null) ? Optional.<Long[]>empty() : virtualNetworkIDs;
}
/**
* The name of the initiator (IQN or WWPN) to create.
**/
public String getName() { return this.name; }
public void setName(String name) {
this.name = name;
}
/**
* The friendly name to assign to this initiator.
**/
public Optional<String> getAlias() { return this.alias; }
public void setAlias(Optional<String> alias) {
this.alias = (alias == null) ? Optional.<String>empty() : alias;
}
/**
* The ID of the volume access group to which this newly created initiator will be added.
**/
public Optional<Long> getVolumeAccessGroupID() { return this.volumeAccessGroupID; }
public void setVolumeAccessGroupID(Optional<Long> volumeAccessGroupID) {
this.volumeAccessGroupID = (volumeAccessGroupID == null) ? Optional.<Long>empty() : volumeAccessGroupID;
}
/**
* A set of JSON attributes assigned to this initiator. (JSON Object)
**/
public Optional<Attributes> getAttributes() { return this.attributes; }
public void setAttributes(Optional<Attributes> attributes) {
this.attributes = (attributes == null) ? Optional.<Attributes>empty() : attributes;
}
/**
* "requireChap" determines if the initiator is required to use CHAP during session login. CHAP is optional if "requireChap" is false.
**/
public Optional<Boolean> getRequireChap() { return this.requireChap; }
public void setRequireChap(Optional<Boolean> requireChap) {
this.requireChap = (requireChap == null) ? Optional.<Boolean>empty() : requireChap;
}
/**
* The CHAP username for this initiator. Defaults to the initiator name (IQN) if not specified during creation and "requireChap" is true.
**/
public Optional<String> getChapUsername() { return this.chapUsername; }
public void setChapUsername(Optional<String> chapUsername) {
this.chapUsername = (chapUsername == null) ? Optional.<String>empty() : chapUsername;
}
/**
* The CHAP secret used for authentication of the initiator. Defaults to a randomly generated secret if not specified during creation and "requireChap" is true.
**/
public Optional<CHAPSecret> getInitiatorSecret() { return this.initiatorSecret; }
public void setInitiatorSecret(Optional<CHAPSecret> initiatorSecret) {
this.initiatorSecret = (initiatorSecret == null) ? Optional.<CHAPSecret>empty() : initiatorSecret;
}
/**
* The CHAP secret used for authentication of the target. Defaults to a randomly generated secret if not specified during creation and "requireChap" is true.
**/
public Optional<CHAPSecret> getTargetSecret() { return this.targetSecret; }
public void setTargetSecret(Optional<CHAPSecret> targetSecret) {
this.targetSecret = (targetSecret == null) ? Optional.<CHAPSecret>empty() : targetSecret;
}
/**
* The list of virtual network identifiers associated with this initiator.
* If one or more are defined, this initiator will only be able to login to the specified virtual networks.
* If no virtual networks are defined this initiator can login to all networks.
**/
public Optional<Long[]> getVirtualNetworkIDs() { return this.virtualNetworkIDs; }
public void setVirtualNetworkIDs(Optional<Long[]> virtualNetworkIDs) {
this.virtualNetworkIDs = (virtualNetworkIDs == null) ? Optional.<Long[]>empty() : virtualNetworkIDs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateInitiator that = (CreateInitiator) o;
return
Objects.equals(name, that.name) &&
Objects.equals(alias, that.alias) &&
Objects.equals(volumeAccessGroupID, that.volumeAccessGroupID) &&
Objects.equals(attributes, that.attributes) &&
Objects.equals(requireChap, that.requireChap) &&
Objects.equals(chapUsername, that.chapUsername) &&
Objects.equals(initiatorSecret, that.initiatorSecret) &&
Objects.equals(targetSecret, that.targetSecret) &&
Objects.equals(virtualNetworkIDs, that.virtualNetworkIDs);
}
@Override
public int hashCode() {
return Objects.hash( name,alias,volumeAccessGroupID,attributes,requireChap,chapUsername,initiatorSecret,targetSecret,virtualNetworkIDs );
}
public java.util.Map<String, Object> toMap() {
java.util.Map<String, Object> map = new HashMap<>();
map.put("name", name);
map.put("alias", alias);
map.put("volumeAccessGroupID", volumeAccessGroupID);
map.put("attributes", attributes);
map.put("requireChap", requireChap);
map.put("chapUsername", chapUsername);
map.put("initiatorSecret", initiatorSecret);
map.put("targetSecret", targetSecret);
map.put("virtualNetworkIDs", virtualNetworkIDs);
return map;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
Gson gson = new Gson();
sb.append( "{ " );
sb.append(" name : ").append(gson.toJson(name)).append(",");
if(null != alias && alias.isPresent()){
sb.append(" alias : ").append(gson.toJson(alias)).append(",");
}
else{
sb.append(" alias : ").append("null").append(",");
}
if(null != volumeAccessGroupID && volumeAccessGroupID.isPresent()){
sb.append(" volumeAccessGroupID : ").append(gson.toJson(volumeAccessGroupID)).append(",");
}
else{
sb.append(" volumeAccessGroupID : ").append("null").append(",");
}
if(null != attributes && attributes.isPresent()){
sb.append(" attributes : ").append(gson.toJson(attributes)).append(",");
}
else{
sb.append(" attributes : ").append("null").append(",");
}
if(null != requireChap && requireChap.isPresent()){
sb.append(" requireChap : ").append(gson.toJson(requireChap)).append(",");
}
else{
sb.append(" requireChap : ").append("null").append(",");
}
if(null != chapUsername && chapUsername.isPresent()){
sb.append(" chapUsername : ").append(gson.toJson(chapUsername)).append(",");
}
else{
sb.append(" chapUsername : ").append("null").append(",");
}
if(null != initiatorSecret && initiatorSecret.isPresent()){
sb.append(" initiatorSecret : ").append(gson.toJson(initiatorSecret)).append(",");
}
else{
sb.append(" initiatorSecret : ").append("null").append(",");
}
if(null != targetSecret && targetSecret.isPresent()){
sb.append(" targetSecret : ").append(gson.toJson(targetSecret)).append(",");
}
else{
sb.append(" targetSecret : ").append("null").append(",");
}
if(null != virtualNetworkIDs && virtualNetworkIDs.isPresent()){
sb.append(" virtualNetworkIDs : ").append(gson.toJson(virtualNetworkIDs)).append(",");
}
else{
sb.append(" virtualNetworkIDs : ").append("null").append(",");
}
sb.append( " }" );
if(sb.lastIndexOf(", }") != -1)
sb.deleteCharAt(sb.lastIndexOf(", }"));
return sb.toString();
}
public static Builder builder() {
return new Builder();
}
public final Builder asBuilder() {
return new Builder().buildFrom(this);
}
public static class Builder {
private String name;
private Optional<String> alias;
private Optional<Long> volumeAccessGroupID;
private Optional<Attributes> attributes;
private Optional<Boolean> requireChap;
private Optional<String> chapUsername;
private Optional<CHAPSecret> initiatorSecret;
private Optional<CHAPSecret> targetSecret;
private Optional<Long[]> virtualNetworkIDs;
private Builder() { }
public CreateInitiator build() {
return new CreateInitiator (
this.name,
this.alias,
this.volumeAccessGroupID,
this.attributes,
this.requireChap,
this.chapUsername,
this.initiatorSecret,
this.targetSecret,
this.virtualNetworkIDs);
}
private CreateInitiator.Builder buildFrom(final CreateInitiator req) {
this.name = req.name;
this.alias = req.alias;
this.volumeAccessGroupID = req.volumeAccessGroupID;
this.attributes = req.attributes;
this.requireChap = req.requireChap;
this.chapUsername = req.chapUsername;
this.initiatorSecret = req.initiatorSecret;
this.targetSecret = req.targetSecret;
this.virtualNetworkIDs = req.virtualNetworkIDs;
return this;
}
public CreateInitiator.Builder name(final String name) {
this.name = name;
return this;
}
public CreateInitiator.Builder optionalAlias(final String alias) {
this.alias = (alias == null) ? Optional.<String>empty() : Optional.of(alias);
return this;
}
public CreateInitiator.Builder optionalVolumeAccessGroupID(final Long volumeAccessGroupID) {
this.volumeAccessGroupID = (volumeAccessGroupID == null) ? Optional.<Long>empty() : Optional.of(volumeAccessGroupID);
return this;
}
public CreateInitiator.Builder optionalAttributes(final Attributes attributes) {
this.attributes = (attributes == null) ? Optional.<Attributes>empty() : Optional.of(attributes);
return this;
}
public CreateInitiator.Builder optionalRequireChap(final Boolean requireChap) {
this.requireChap = (requireChap == null) ? Optional.<Boolean>empty() : Optional.of(requireChap);
return this;
}
public CreateInitiator.Builder optionalChapUsername(final String chapUsername) {
this.chapUsername = (chapUsername == null) ? Optional.<String>empty() : Optional.of(chapUsername);
return this;
}
public CreateInitiator.Builder optionalInitiatorSecret(final CHAPSecret initiatorSecret) {
this.initiatorSecret = (initiatorSecret == null) ? Optional.<CHAPSecret>empty() : Optional.of(initiatorSecret);
return this;
}
public CreateInitiator.Builder optionalTargetSecret(final CHAPSecret targetSecret) {
this.targetSecret = (targetSecret == null) ? Optional.<CHAPSecret>empty() : Optional.of(targetSecret);
return this;
}
public CreateInitiator.Builder optionalVirtualNetworkIDs(final Long[] virtualNetworkIDs) {
this.virtualNetworkIDs = (virtualNetworkIDs == null) ? Optional.<Long[]>empty() : Optional.of(virtualNetworkIDs);
return this;
}
}
}
|
|
/*
* Copyright (c) 2016 Gridtec. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package at.gridtec.lambda4j.operator.unary;
import at.gridtec.lambda4j.Lambda;
import at.gridtec.lambda4j.consumer.ThrowableIntConsumer;
import at.gridtec.lambda4j.core.exception.ThrownByFunctionalInterfaceException;
import at.gridtec.lambda4j.core.util.ThrowableUtils;
import at.gridtec.lambda4j.function.ThrowableFunction;
import at.gridtec.lambda4j.function.ThrowableIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableBooleanToIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableByteToIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableCharToIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableDoubleToIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableFloatToIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableIntToByteFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableIntToCharFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableIntToDoubleFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableIntToFloatFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableIntToLongFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableIntToShortFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableLongToIntFunction;
import at.gridtec.lambda4j.function.conversion.ThrowableShortToIntFunction;
import at.gridtec.lambda4j.function.to.ThrowableToIntFunction;
import at.gridtec.lambda4j.predicate.ThrowableIntPredicate;
import javax.annotation.Nonnegative;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.function.IntUnaryOperator;
/**
* Represents an operation that accepts one {@code int}-valued input argument and produces a
* {@code int}-valued result which is able to throw any {@link Throwable}.
* This is a primitive specialization of {@link ThrowableUnaryOperator}.
* <p>
* This is a {@link FunctionalInterface} whose functional method is {@link #applyAsIntThrows(int)}.
*
* @param <X> The type of the throwable to be thrown by this operator
* @apiNote This is a throwable JDK lambda.
* @see ThrowableUnaryOperator
*/
@SuppressWarnings("unused")
@FunctionalInterface
public interface ThrowableIntUnaryOperator<X extends Throwable> extends Lambda, IntUnaryOperator {
/**
* Constructs a {@link ThrowableIntUnaryOperator} based on a lambda expression or a method reference. Thereby the
* given lambda expression or method reference is returned on an as-is basis to implicitly transform it to the
* desired type. With this method, it is possible to ensure that correct type is used from lambda expression or
* method reference.
*
* @param <X> The type of the throwable to be thrown by this operator
* @param expression A lambda expression or (typically) a method reference, e.g. {@code this::method}
* @return A {@code ThrowableIntUnaryOperator} from given lambda expression or method reference.
* @implNote This implementation allows the given argument to be {@code null}, but only if {@code null} given,
* {@code null} will be returned.
* @see <a href="https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html#syntax">Lambda
* Expression</a>
* @see <a href="https://docs.oracle.com/javase/tutorial/java/javaOO/methodreferences.html">Method Reference</a>
*/
static <X extends Throwable> ThrowableIntUnaryOperator<X> of(
@Nullable final ThrowableIntUnaryOperator<X> expression) {
return expression;
}
/**
* Calls the given {@link ThrowableIntUnaryOperator} with the given argument and returns its result.
*
* @param <X> The type of the throwable to be thrown by this operator
* @param operator The operator to be called
* @param value The argument to the operator
* @return The result from the given {@code ThrowableIntUnaryOperator}.
* @throws NullPointerException If given argument is {@code null}
* @throws X Any throwable from this operators action
*/
static <X extends Throwable> int call(@Nonnull final ThrowableIntUnaryOperator<? extends X> operator,
int value) throws X {
Objects.requireNonNull(operator);
return operator.applyAsIntThrows(value);
}
/**
* Returns a {@link ThrowableIntUnaryOperator} that always returns its input argument.
*
* @param <X> The type of the throwable to be thrown by this operator
* @return A {@code ThrowableIntUnaryOperator} that always returns its input argument
*/
@Nonnull
static <X extends Throwable> ThrowableIntUnaryOperator<X> identity() {
return (value) -> value;
}
/**
* Creates a {@link ThrowableIntUnaryOperator} which always returns a given value.
*
* @param <X> The type of the throwable to be thrown by this operator
* @param ret The return value for the constant
* @return A {@code ThrowableIntUnaryOperator} which always returns a given value.
*/
@Nonnull
static <X extends Throwable> ThrowableIntUnaryOperator<X> constant(int ret) {
return (value) -> ret;
}
/**
* Applies this operator to the given argument.
*
* @param value The argument to the operator
* @return The return value from the operator, which is its result.
* @throws X Any throwable from this operators action
*/
int applyAsIntThrows(int value) throws X;
/**
* Applies this operator to the given argument.
*
* @param value The argument to the operator
* @return The return value from the operator, which is its result.
* @apiNote This method mainly exists to use this {@link ThrowableIntUnaryOperator} in JRE specific methods only
* accepting {@link IntUnaryOperator}. If this operator should be applied, then the {@link #applyAsIntThrows(int)}
* method should be used.
* @apiNote Overrides the {@link IntUnaryOperator#applyAsInt(int)} method by using a redefinition as default method.
* This implementation calls the {@link #applyAsIntThrows(int)} method of this function and catches the eventually
* thrown {@link Throwable} from it. If it is of type {@link RuntimeException} or {@link Error} it is rethrown as
* is. Other {@code Throwable} types are wrapped in a {@link ThrownByFunctionalInterfaceException}.
*/
@Override
default int applyAsInt(int value) {
// TODO: Remove commented code below
/*try {
return this.applyAsIntThrows(value);
} catch (RuntimeException | Error e) {
throw e;
} catch (Throwable throwable) {
throw new ThrownByFunctionalInterfaceException(throwable.getMessage(), throwable);
}*/
return nest().applyAsInt(value);
}
/**
* Returns the number of arguments for this operator.
*
* @return The number of arguments for this operator.
* @implSpec The default implementation always returns {@code 1}.
*/
@Nonnegative
default int arity() {
return 1;
}
/**
* Returns a composed {@link ThrowableToIntFunction} that first applies the {@code before} function to its input,
* and then applies this operator to the result.
*
* @param <A> The type of the argument to the given function, and of composed function
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableToIntFunction} that first applies the {@code before} function to its input,
* and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is able to handle every type.
*/
@Nonnull
default <A> ThrowableToIntFunction<A, X> compose(
@Nonnull final ThrowableToIntFunction<? super A, ? extends X> before) {
Objects.requireNonNull(before);
return (a) -> applyAsIntThrows(before.applyAsIntThrows(a));
}
/**
* Returns a composed {@link ThrowableBooleanToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result. This method is just convenience, to provide the ability to
* execute an operation which accepts {@code boolean} input, before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableBooleanToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* boolean}.
*/
@Nonnull
default ThrowableBooleanToIntFunction<X> composeFromBoolean(
@Nonnull final ThrowableBooleanToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableByteToIntFunction} that first applies the {@code before} function to
* its input, and then applies this operator to the result.
* This method is just convenience, to provide the ability to execute an operation which accepts {@code byte} input,
* before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableByteToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* byte}.
*/
@Nonnull
default ThrowableByteToIntFunction<X> composeFromByte(
@Nonnull final ThrowableByteToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableCharToIntFunction} that first applies the {@code before} function to
* its input, and then applies this operator to the result.
* This method is just convenience, to provide the ability to execute an operation which accepts {@code char} input,
* before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableCharToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* char}.
*/
@Nonnull
default ThrowableCharToIntFunction<X> composeFromChar(
@Nonnull final ThrowableCharToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableDoubleToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result. This method is just convenience, to provide the ability to
* execute an operation which accepts {@code double} input, before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableDoubleToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* double}.
*/
@Nonnull
default ThrowableDoubleToIntFunction<X> composeFromDouble(
@Nonnull final ThrowableDoubleToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableFloatToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result. This method is just convenience, to provide the ability to
* execute an operation which accepts {@code float} input, before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableFloatToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* float}.
*/
@Nonnull
default ThrowableFloatToIntFunction<X> composeFromFloat(
@Nonnull final ThrowableFloatToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntUnaryOperator} that first applies the {@code before} operator to
* its input, and then applies this operator to the result.
* This method is just convenience, to provide the ability to execute an operation which accepts {@code int} input,
* before this primitive operator is executed.
*
* @param before The operator to apply before this operator is applied
* @return A composed {@code ThrowableIntUnaryOperator} that first applies the {@code before} operator to its input,
* and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* int}.
*/
@Nonnull
default ThrowableIntUnaryOperator<X> composeFromInt(@Nonnull final ThrowableIntUnaryOperator<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableLongToIntFunction} that first applies the {@code before} function to
* its input, and then applies this operator to the result.
* This method is just convenience, to provide the ability to execute an operation which accepts {@code long} input,
* before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableLongToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* long}.
*/
@Nonnull
default ThrowableLongToIntFunction<X> composeFromLong(
@Nonnull final ThrowableLongToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableShortToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result. This method is just convenience, to provide the ability to
* execute an operation which accepts {@code short} input, before this primitive operator is executed.
*
* @param before The function to apply before this operator is applied
* @return A composed {@code ThrowableShortToIntFunction} that first applies the {@code before} function to its
* input, and then applies this operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to handle primitive values. In this case this is {@code
* short}.
*/
@Nonnull
default ThrowableShortToIntFunction<X> composeFromShort(
@Nonnull final ThrowableShortToIntFunction<? extends X> before) {
Objects.requireNonNull(before);
return (value) -> applyAsIntThrows(before.applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntFunction} that first applies this operator to its input, and then applies
* the {@code after} function to the result.
*
* @param <S> The type of return value from the {@code after} function, and of the composed function
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntFunction} that first applies this operator to its input, and then applies
* the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is able to return every type.
*/
@Nonnull
default <S> ThrowableIntFunction<S, X> andThen(
@Nonnull final ThrowableIntFunction<? extends S, ? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntPredicate} that first applies this operator to its input, and then applies
* the {@code after} predicate to the result. This method is just convenience, to provide the ability to transform
* this primitive operator to an operation returning {@code boolean}.
*
* @param after The predicate to apply after this operator is applied
* @return A composed {@code ThrowableIntPredicate} that first applies this operator to its input, and then applies
* the {@code after} predicate to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* boolean}.
*/
@Nonnull
default ThrowableIntPredicate<X> andThenToBoolean(@Nonnull final ThrowableIntPredicate<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.testThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntToByteFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code byte}.
*
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntToByteFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* byte}.
*/
@Nonnull
default ThrowableIntToByteFunction<X> andThenToByte(@Nonnull final ThrowableIntToByteFunction<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsByteThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntToCharFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code char}.
*
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntToCharFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* char}.
*/
@Nonnull
default ThrowableIntToCharFunction<X> andThenToChar(@Nonnull final ThrowableIntToCharFunction<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsCharThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntToDoubleFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code double}.
*
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntToDoubleFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* double}.
*/
@Nonnull
default ThrowableIntToDoubleFunction<X> andThenToDouble(
@Nonnull final ThrowableIntToDoubleFunction<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsDoubleThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntToFloatFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code float}.
*
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntToFloatFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* float}.
*/
@Nonnull
default ThrowableIntToFloatFunction<X> andThenToFloat(
@Nonnull final ThrowableIntToFloatFunction<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsFloatThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntUnaryOperator} that first applies this operator to its input, and then
* applies the {@code after} operator to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code int}.
*
* @param after The operator to apply after this operator is applied
* @return A composed {@code ThrowableIntUnaryOperator} that first applies this operator to its input, and then
* applies the {@code after} operator to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* int}.
*/
@Nonnull
default ThrowableIntUnaryOperator<X> andThenToInt(@Nonnull final ThrowableIntUnaryOperator<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsIntThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntToLongFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code long}.
*
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntToLongFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* long}.
*/
@Nonnull
default ThrowableIntToLongFunction<X> andThenToLong(@Nonnull final ThrowableIntToLongFunction<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsLongThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntToShortFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result. This method is just convenience, to provide the ability to
* transform this primitive operator to an operation returning {@code short}.
*
* @param after The function to apply after this operator is applied
* @return A composed {@code ThrowableIntToShortFunction} that first applies this operator to its input, and then
* applies the {@code after} function to the result.
* @throws NullPointerException If given argument is {@code null}
* @implSpec The input argument of this method is a able to return primitive values. In this case this is {@code
* short}.
*/
@Nonnull
default ThrowableIntToShortFunction<X> andThenToShort(
@Nonnull final ThrowableIntToShortFunction<? extends X> after) {
Objects.requireNonNull(after);
return (value) -> after.applyAsShortThrows(applyAsIntThrows(value));
}
/**
* Returns a composed {@link ThrowableIntConsumer} that fist applies this operator to its input, and then consumes
* the result using the given {@link ThrowableIntConsumer}.
*
* @param consumer The operation which consumes the result from this operation
* @return A composed {@code ThrowableIntConsumer} that first applies this operator to its input, and then consumes
* the result using the given {@code ThrowableIntConsumer}.
* @throws NullPointerException If given argument is {@code null}
*/
@Nonnull
default ThrowableIntConsumer<X> consume(@Nonnull final ThrowableIntConsumer<? extends X> consumer) {
Objects.requireNonNull(consumer);
return (value) -> consumer.acceptThrows(applyAsIntThrows(value));
}
/**
* Returns a memoized (caching) version of this {@link ThrowableIntUnaryOperator}. Whenever it is called, the
* mapping between the input parameter and the return value is preserved in a cache, making subsequent calls
* returning the memoized value instead of computing the return value again.
* <p>
* Unless the operator and therefore the used cache will be garbage-collected, it will keep all memoized values
* forever.
*
* @return A memoized (caching) version of this {@code ThrowableIntUnaryOperator}.
* @implSpec This implementation does not allow the input parameter or return value to be {@code null} for the
* resulting memoized operator, as the cache used internally does not permit {@code null} keys or values.
* @implNote The returned memoized operator can be safely used concurrently from multiple threads which makes it
* thread-safe.
*/
@Nonnull
default ThrowableIntUnaryOperator<X> memoized() {
if (isMemoized()) {
return this;
} else {
final Map<Integer, Integer> cache = new ConcurrentHashMap<>();
final Object lock = new Object();
return (ThrowableIntUnaryOperator<X> & Memoized) (value) -> {
final int returnValue;
synchronized (lock) {
returnValue = cache.computeIfAbsent(value, ThrowableFunction.of(this::applyAsIntThrows));
}
return returnValue;
};
}
}
/**
* Returns a composed {@link ThrowableUnaryOperator} which represents this {@link ThrowableIntUnaryOperator}.
* Thereby the primitive input argument for this operator is autoboxed. This method provides the possibility to use
* this {@code ThrowableIntUnaryOperator} with methods provided by the {@code JDK}.
*
* @return A composed {@code ThrowableUnaryOperator} which represents this {@code ThrowableIntUnaryOperator}.
*/
@Nonnull
default ThrowableUnaryOperator<Integer, X> boxed() {
return this::applyAsIntThrows;
}
/**
* Returns a composed {@link IntUnaryOperator2} that applies this operator to its input and nests the thrown {@link
* Throwable} from it. The {@code Throwable} is nested (wrapped) in a {@link ThrownByFunctionalInterfaceException},
* which is constructed from the thrown {@code Throwable}s message and the thrown {@code Throwable} itself.
*
* @return A composed {@link IntUnaryOperator2} that applies this operator to its input and nests the thrown {@code
* Throwable} from it.
* @implNote If thrown {@code Throwable} is of type {@link Error} it is thrown as-is and thus not nested.
* @see #nest(Function)
* @see ThrownByFunctionalInterfaceException
*/
@Nonnull
default IntUnaryOperator2 nest() {
return nest(throwable -> new ThrownByFunctionalInterfaceException(throwable.getMessage(), throwable));
}
/**
* Returns a composed {@link IntUnaryOperator2} that applies this operator to its input and nests the thrown {@link
* Throwable} from it using {@code mapper} operation. Thereby {@code mapper} may modify the thrown {@code
* Throwable}, regarding its implementation, and returns it nested (wrapped) in a {@link RuntimeException}.
*
* @param mapper The operation to map the thrown {@code Throwable} to {@code RuntimeException}
* @return A composed {@link IntUnaryOperator2} that applies this operator to its input and nests the thrown {@code
* Throwable} from it using {@code mapper} operation.
* @throws NullPointerException If given argument is {@code null}
* @implNote If thrown {@code Throwable} is of type {@link Error} it is thrown as-is and thus not nested.
* @see #nest()
*/
@Nonnull
default IntUnaryOperator2 nest(@Nonnull final Function<? super Throwable, ? extends RuntimeException> mapper) {
return recover(throwable -> {
throw mapper.apply(throwable);
});
}
/**
* Returns a composed {@link IntUnaryOperator2} that first applies this operator to its input, and then applies the
* {@code recover} operation if a {@link Throwable} is thrown from this one. The {@code recover} operation is
* represented by a curried operation which is called with throwable information and same argument of this
* operator.
*
* @param recover The operation to apply if this operator throws a {@code Throwable}
* @return A composed {@link IntUnaryOperator2} that first applies this operator to its input, and then applies the
* {@code recover} operation if a {@code Throwable} is thrown from this one.
* @throws NullPointerException If given argument or the returned enclosing operator is {@code null}
* @implSpec The implementation checks that the returned enclosing operator from {@code recover} operation is not
* {@code null}. If it is, then a {@link NullPointerException} with appropriate message is thrown.
* @implNote If thrown {@code Throwable} is of type {@link Error}, it is thrown as-is and thus not passed to {@code
* recover} operation.
*/
@Nonnull
default IntUnaryOperator2 recover(@Nonnull final Function<? super Throwable, ? extends IntUnaryOperator> recover) {
Objects.requireNonNull(recover);
return (value) -> {
try {
return this.applyAsIntThrows(value);
} catch (Error e) {
throw e;
} catch (Throwable throwable) {
final IntUnaryOperator operator = recover.apply(throwable);
Objects.requireNonNull(operator, () -> "recover returned null for " + throwable.getClass() + ": "
+ throwable.getMessage());
return operator.applyAsInt(value);
}
};
}
/**
* Returns a composed {@link IntUnaryOperator2} that applies this operator to its input and sneakily throws the
* thrown {@link Throwable} from it, if it is not of type {@link RuntimeException} or {@link Error}. This means that
* each throwable thrown from the returned composed operator behaves exactly the same as an <em>unchecked</em>
* throwable does. As a result, there is no need to handle the throwable of this operator in the returned composed
* operator by either wrapping it in an <em>unchecked</em> throwable or to declare it in the {@code throws} clause,
* as it would be done in a non sneaky throwing operator.
* <p>
* What sneaky throwing simply does, is to fake out the compiler and thus it bypasses the principle of
* <em>checked</em> throwables. On the JVM (class file) level, all throwables, checked or not, can be thrown
* regardless of the {@code throws} clause of methods, which is why this works at all.
* <p>
* However, when using this method to get a sneaky throwing operator variant of this throwable operator, the
* following advantages, disadvantages and limitations will apply:
* <p>
* If the calling-code is to handle the sneakily thrown throwable, it is required to add it to the {@code throws}
* clause of the method that applies the returned composed operator. The compiler will not force the declaration in
* the {@code throws} clause anymore.
* <p>
* If the calling-code already handles the sneakily thrown throwable, the compiler requires it to be added to the
* {@code throws} clause of the method that applies the returned composed operator. If not added, the compiler will
* error that the caught throwable is never thrown in the corresponding {@code try} block.
* <p>
* If the returned composed operator is directly surrounded by a {@code try}-{@code catch} block to catch the
* sneakily thrown throwable from it, the compiler will error that the caught throwable is never thrown in the
* corresponding {@code try} block.
* <p>
* In any case, if the throwable is not added to the to the {@code throws} clause of the method that applies the
* returned composed operator, the calling-code won't be able to catch the throwable by name. It will bubble and
* probably be caught in some {@code catch} statement, catching a base type such as {@code try { ... }
* catch(RuntimeException e) { ... }} or {@code try { ... } catch(Exception e) { ... }}, but perhaps this is
* intended.
* <p>
* When the called code never throws the specific throwable that it declares, it should obviously be omitted. For
* example: {@code new String(byteArr, "UTF-8") throws UnsupportedEncodingException}, but {@code UTF-8} is
* guaranteed by the Java specification to be always present. Here, the {@code throws} declaration is a nuisance and
* any solution to silence it with minimal boilerplate is welcome. The throwable should therefore be omitted in the
* {@code throws} clause of the method that applies the returned composed operator.
* <p>
* With all that mentioned, the following example will demonstrate this methods correct use:
* <pre>{@code
* // when called with illegal value ClassNotFoundException is thrown
* public Class<?> sneakyThrowingFunctionalInterface(final String className) throws ClassNotFoundException {
* return ThrowableFunction.of(Class::forName) // create the correct throwable functional interface
* .sneakyThrow() // create a non-throwable variant which is able to sneaky throw (this method)
* .apply(className); // apply non-throwable variant -> may sneaky throw a throwable
* }
*
* // call the the method which surround the sneaky throwing functional interface
* public void callingMethod() {
* try {
* final Class<?> clazz = sneakyThrowingFunctionalInterface("some illegal class name");
* // ... do something with clazz ...
* } catch(ClassNotFoundException e) {
* // ... do something with e ...
* }
* }
* }</pre>
* In conclusion, this somewhat contentious ability should be used carefully, of course, with the advantages,
* disadvantages and limitations described above kept in mind.
*
* @return A composed {@link IntUnaryOperator2} that applies this operator to its input and sneakily throws the
* thrown {@link Throwable} from it, unless it is of type {@link RuntimeException} or {@link Error}.
* @implNote If thrown {@link Throwable} is of type {@link RuntimeException} or {@link Error}, it is thrown as-is
* and thus not sneakily thrown.
*/
@Nonnull
default IntUnaryOperator2 sneakyThrow() {
return (value) -> {
try {
return this.applyAsIntThrows(value);
} catch (RuntimeException | Error e) {
throw e;
} catch (Throwable throwable) {
throw ThrowableUtils.sneakyThrow(throwable);
}
};
}
}
|
|
/*
* Copyright (C) 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.android.encode;
import android.app.Activity;
import android.app.AlertDialog;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.Point;
import android.net.Uri;
import android.os.Bundle;
import android.os.Environment;
import android.util.Log;
import android.view.Display;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.WindowManager;
import android.widget.ImageView;
import android.widget.TextView;
import com.google.zxing.R;
import com.google.zxing.WriterException;
import com.google.zxing.client.android.Contents;
import com.google.zxing.client.android.FinishListener;
import com.google.zxing.client.android.Intents;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.regex.Pattern;
/**
* This class encodes data from an Intent into a QR code, and then displays it full screen so that
* another person can scan it with their device.
*
* @author [email protected] (Daniel Switkin)
*/
public final class EncodeActivity extends Activity {
private static final String TAG = EncodeActivity.class.getSimpleName();
private static final int MAX_BARCODE_FILENAME_LENGTH = 24;
private static final Pattern NOT_ALPHANUMERIC = Pattern.compile("[^A-Za-z0-9]");
private static final String USE_VCARD_KEY = "USE_VCARD";
private QRCodeEncoder qrCodeEncoder;
private static CharSequence makeBarcodeFileName(CharSequence contents) {
String fileName = NOT_ALPHANUMERIC.matcher(contents).replaceAll("_");
if (fileName.length() > MAX_BARCODE_FILENAME_LENGTH) {
fileName = fileName.substring(0, MAX_BARCODE_FILENAME_LENGTH);
}
return fileName;
}
@Override
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
Intent intent = getIntent();
if (intent == null) {
finish();
} else {
String action = intent.getAction();
if (Intents.Encode.ACTION.equals(action) || Intent.ACTION_SEND.equals(action)) {
setContentView(R.layout.activity_encode);
} else {
finish();
}
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
MenuInflater menuInflater = getMenuInflater();
menuInflater.inflate(R.menu.encode, menu);
boolean useVcard = qrCodeEncoder != null && qrCodeEncoder.isUseVCard();
int encodeNameResource = useVcard ? R.string.menu_encode_mecard : R.string.menu_encode_vcard;
MenuItem encodeItem = menu.findItem(R.id.menu_encode);
encodeItem.setTitle(encodeNameResource);
Intent intent = getIntent();
if (intent != null) {
String type = intent.getStringExtra(Intents.Encode.TYPE);
encodeItem.setVisible(Contents.Type.CONTACT.equals(type));
}
return super.onCreateOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int i = item.getItemId();
if (i == R.id.menu_share) {
share();
return true;
} else if (i == R.id.menu_encode) {
Intent intent = getIntent();
if (intent == null) {
return false;
}
intent.putExtra(USE_VCARD_KEY, !qrCodeEncoder.isUseVCard());
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
startActivity(intent);
finish();
return true;
} else {
return false;
}
}
private void share() {
QRCodeEncoder encoder = qrCodeEncoder;
if (encoder == null) { // Odd
Log.w(TAG, "No existing barcode to send?");
return;
}
String contents = encoder.getContents();
if (contents == null) {
Log.w(TAG, "No existing barcode to send?");
return;
}
Bitmap bitmap;
try {
bitmap = encoder.encodeAsBitmap();
} catch (WriterException we) {
Log.w(TAG, we);
return;
}
if (bitmap == null) {
return;
}
File bsRoot = new File(Environment.getExternalStorageDirectory(), "BarcodeScanner");
File barcodesRoot = new File(bsRoot, "Barcodes");
if (!barcodesRoot.exists() && !barcodesRoot.mkdirs()) {
Log.w(TAG, "Couldn't make dir " + barcodesRoot);
showErrorMessage(R.string.msg_unmount_usb);
return;
}
File barcodeFile = new File(barcodesRoot, makeBarcodeFileName(contents) + ".png");
if (!barcodeFile.delete()) {
Log.w(TAG, "Could not delete " + barcodeFile);
// continue anyway
}
FileOutputStream fos = null;
try {
fos = new FileOutputStream(barcodeFile);
bitmap.compress(Bitmap.CompressFormat.PNG, 0, fos);
} catch (FileNotFoundException fnfe) {
Log.w(TAG, "Couldn't access file " + barcodeFile + " due to " + fnfe);
showErrorMessage(R.string.msg_unmount_usb);
return;
} finally {
if (fos != null) {
try {
fos.close();
} catch (IOException ioe) {
// do nothing
}
}
}
Intent intent = new Intent(Intent.ACTION_SEND, Uri.parse("mailto:"));
intent.putExtra(Intent.EXTRA_SUBJECT, getString(R.string.app_name) + " - " + encoder.getTitle());
intent.putExtra(Intent.EXTRA_TEXT, contents);
intent.putExtra(Intent.EXTRA_STREAM, Uri.parse("file://" + barcodeFile.getAbsolutePath()));
intent.setType("image/png");
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET);
startActivity(Intent.createChooser(intent, null));
}
@Override
protected void onResume() {
super.onResume();
// This assumes the view is full screen, which is a good assumption
WindowManager manager = (WindowManager) getSystemService(WINDOW_SERVICE);
Display display = manager.getDefaultDisplay();
Point displaySize = new Point();
display.getSize(displaySize);
int width = displaySize.x;
int height = displaySize.y;
int smallerDimension = width < height ? width : height;
smallerDimension = smallerDimension * 7 / 8;
Intent intent = getIntent();
if (intent == null) {
return;
}
try {
boolean useVCard = intent.getBooleanExtra(USE_VCARD_KEY, false);
qrCodeEncoder = new QRCodeEncoder(this, intent, smallerDimension, useVCard);
Bitmap bitmap = qrCodeEncoder.encodeAsBitmap();
if (bitmap == null) {
Log.w(TAG, "Could not activity_encode barcode");
showErrorMessage(R.string.msg_encode_contents_failed);
qrCodeEncoder = null;
return;
}
ImageView view = (ImageView) findViewById(R.id.image_view);
view.setImageBitmap(bitmap);
TextView contents = (TextView) findViewById(R.id.contents_text_view);
if (intent.getBooleanExtra(Intents.Encode.SHOW_CONTENTS, true)) {
contents.setText(qrCodeEncoder.getDisplayContents());
setTitle(qrCodeEncoder.getTitle());
} else {
contents.setText("");
setTitle("");
}
} catch (WriterException e) {
Log.w(TAG, "Could not activity_encode barcode", e);
showErrorMessage(R.string.msg_encode_contents_failed);
qrCodeEncoder = null;
}
}
private void showErrorMessage(int message) {
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setMessage(message);
builder.setPositiveButton(R.string.button_ok, new FinishListener(this));
builder.setOnCancelListener(new FinishListener(this));
builder.show();
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.arrow.vector;
import org.apache.arrow.memory.BufferAllocator;
import org.apache.arrow.vector.complex.impl.TimeStampNanoTZReaderImpl;
import org.apache.arrow.vector.complex.reader.FieldReader;
import org.apache.arrow.vector.holders.TimeStampNanoTZHolder;
import org.apache.arrow.vector.holders.NullableTimeStampNanoTZHolder;
import org.apache.arrow.vector.types.TimeUnit;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.FieldType;
import org.apache.arrow.vector.util.TransferPair;
/**
* TimeStampNanoTZVector implements a fixed width vector (8 bytes) of
* timestamp (nanosecond resolution) values which could be null. A validity buffer
* (bit vector) is maintained to track which elements in the vector are null.
*/
public class TimeStampNanoTZVector extends TimeStampVector {
private final FieldReader reader;
private final String timeZone;
/**
* Instantiate a TimeStampNanoTZVector. This doesn't allocate any memory for
* the data in vector.
* @param name name of the vector
* @param allocator allocator for memory management.
*/
public TimeStampNanoTZVector(String name, BufferAllocator allocator, String timeZone) {
this(name, FieldType.nullable(new org.apache.arrow.vector.types.pojo.ArrowType.Timestamp(TimeUnit.NANOSECOND, timeZone)),
allocator);
}
/**
* Instantiate a TimeStampNanoTZVector. This doesn't allocate any memory for
* the data in vector.
* @param name name of the vector
* @param fieldType type of Field materialized by this vector
* @param allocator allocator for memory management.
*/
public TimeStampNanoTZVector(String name, FieldType fieldType, BufferAllocator allocator) {
super(name, fieldType, allocator);
org.apache.arrow.vector.types.pojo.ArrowType.Timestamp arrowType = (org.apache.arrow.vector.types.pojo.ArrowType.Timestamp) fieldType.getType();
timeZone = arrowType.getTimezone();
reader = new TimeStampNanoTZReaderImpl(TimeStampNanoTZVector.this);
}
/**
* Get a reader that supports reading values from this vector
* @return Field Reader for this vector
*/
@Override
public FieldReader getReader() {
return reader;
}
/**
* Get minor type for this vector. The vector holds values belonging
* to a particular type.
* @return {@link org.apache.arrow.vector.types.Types.MinorType}
*/
@Override
public Types.MinorType getMinorType() {
return Types.MinorType.TIMESTAMPNANOTZ;
}
/******************************************************************
* *
* vector value retrieval methods *
* *
******************************************************************/
/**
* Get the element at the given index from the vector and
* sets the state in holder. If element at given index
* is null, holder.isSet will be zero.
*
* @param index position of element
*/
public void get(int index, NullableTimeStampNanoTZHolder holder) {
if (isSet(index) == 0) {
holder.isSet = 0;
return;
}
holder.isSet = 1;
holder.value = valueBuffer.getLong(index * TYPE_WIDTH);
}
/**
* Same as {@link #get(int)}.
*
* @param index position of element
* @return element at given index
*/
public Long getObject(int index) {
if (isSet(index) == 0) {
return null;
} else {
return valueBuffer.getLong(index * TYPE_WIDTH);
}
}
/******************************************************************
* *
* vector value setter methods *
* *
******************************************************************/
/**
* Set the element at the given index to the value set in data holder.
* If the value in holder is not indicated as set, element in the
* at the given index will be null.
*
* @param index position of element
* @param holder nullable data holder for value of element
*/
public void set(int index, NullableTimeStampNanoTZHolder holder) throws IllegalArgumentException {
if (holder.isSet < 0) {
throw new IllegalArgumentException();
} else if (holder.isSet > 0) {
BitVectorHelper.setValidityBitToOne(validityBuffer, index);
setValue(index, holder.value);
} else {
BitVectorHelper.setValidityBit(validityBuffer, index, 0);
}
}
/**
* Set the element at the given index to the value set in data holder.
*
* @param index position of element
* @param holder data holder for value of element
*/
public void set(int index, TimeStampNanoTZHolder holder) {
BitVectorHelper.setValidityBitToOne(validityBuffer, index);
setValue(index, holder.value);
}
/**
* Same as {@link #set(int, NullableTimeStampNanoTZHolder)} except that it handles the
* case when index is greater than or equal to existing
* value capacity {@link #getValueCapacity()}.
*
* @param index position of element
* @param holder nullable data holder for value of element
*/
public void setSafe(int index, NullableTimeStampNanoTZHolder holder) throws IllegalArgumentException {
handleSafe(index);
set(index, holder);
}
/**
* Same as {@link #set(int, TimeStampNanoTZHolder)} except that it handles the
* case when index is greater than or equal to existing
* value capacity {@link #getValueCapacity()}.
*
* @param index position of element
* @param holder data holder for value of element
*/
public void setSafe(int index, TimeStampNanoTZHolder holder) {
handleSafe(index);
set(index, holder);
}
/******************************************************************
* *
* vector transfer *
* *
******************************************************************/
/**
* Construct a TransferPair comprising of this and and a target vector of
* the same type.
* @param ref name of the target vector
* @param allocator allocator for the target vector
* @return {@link TransferPair}
*/
@Override
public TransferPair getTransferPair(String ref, BufferAllocator allocator) {
TimeStampNanoTZVector to = new TimeStampNanoTZVector(ref,
field.getFieldType(), allocator);
return new TransferImpl(to);
}
/**
* Construct a TransferPair with a desired target vector of the same type.
* @param to target vector
* @return {@link TransferPair}
*/
@Override
public TransferPair makeTransferPair(ValueVector to) {
return new TransferImpl((TimeStampNanoTZVector) to);
}
}
|
|
package mp400;
import java.util.Map;
/**
*
* @author Keegan Ott
*/
/**
* every classification filter function in this file initialises a Blob
* object to null and searches through blob candidates in order to find one
* that fits the bill, if none is found, the blob object remains null and
* the check for null at the end of each function only allows an initialized
* blob to be written out - this works well for combination images which might
* not contain an object, the check for that object will still be completed
* but no file will be written out if nothing is found.
*/
public class FindBlob
{
/**
*Hardcoded classification criteria that consistently identifies the purple
* dome
* @param blobs
* @return
*/
public static Blob findPurpleDisk(Map<Integer,Blob> blobs)
{
Blob purpleDome = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("purple"))
{
if(blob.getDensity() < .81 && blob.getDensity() > .76)
{
purpleDome = blob;
}
}
}
if(purpleDome != null)
purpleDome.setObjectOfInterest("PurpleDome");
return purpleDome;
}
/**
*Hardcoded classification criteria that consistently identifies the Amazon figurine
* @param blobs
* @return
*/
public static Blob findAmazon(Map<Integer,Blob> blobs)
{
Blob amazonMan = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("amazon_brown"))
{
if(blob.getDensity() > .48 && blob.getDensity() < .9)
{
if(Extra.PixelRange(blob.avePix, new PixHSV(30,.7,.18), new PixHSV(36,.95,.33))) /*PixHSV(30,.7,18), new PixHSV(36,.95,.33)))*/
{
if(blob.getAspectRatio() > .38 && blob.getAspectRatio() < .70)
amazonMan = blob;
}
}
}
}
if(amazonMan != null)
amazonMan.setObjectOfInterest("Amazon");
return amazonMan;
}
/**
*Hardcoded classification criteria that consistently identifies the Small Totoro
* @param blobs
* @return
*/
public static Blob findBlueTotoro(Map<Integer,Blob> blobs)
{
Blob blueToto = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("light_blue"))
{
if(blob.getDensity() < .65 && blob.getDensity() > .30)
{
blueToto = blob;
}
}
}
if(blueToto != null)
blueToto.setObjectOfInterest("SmallTotoro");
return blueToto;
}
/**
*Hardcoded classification criteria that consistently identifies the Large Totoro
* @param blobs
* @return
*/
public static Blob findGreyToto(Map<Integer,Blob> blobs)
{
Blob greyToto = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("totoro_brown"))
{
if(Extra.PixelRange(blob.avePix, new PixHSV(32,.37,.15), new PixHSV(40,.55,.33)))
{
if(blob.getAspectRatio() > .4 && blob.getAspectRatio() <= 1.2)
greyToto = blob;
}
}
}
if(greyToto != null)
greyToto.setObjectOfInterest("LargeTotoro");
return greyToto;
}
/**
*Hardcoded classification criteria that consistently identifies the Keepon Toy
* @param blobs
* @return
*/
public static Blob findKeepon(Map<Integer,Blob> blobs)
{
Blob keepon = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("orange_yellow"))
{
if(Extra.PixelRange(blob.avePix, new PixHSV(30,.85,.39), new PixHSV(40,1,.65)))
{
if(blob.getAspectRatio() > .75 && blob.getAspectRatio() <= .91)
keepon = blob;
}
}
}
if(keepon != null)
keepon.setObjectOfInterest("Keepon");
return keepon;
}
/**
*Hardcoded classification criteria that consistently identifies the Rubber duck
* @param blobs
* @return
*/
public static Blob findDuck(Map<Integer,Blob> blobs)
{
Blob duck = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("orange_yellow"))
{
if(Extra.PixelRange(blob.avePix, new PixHSV(32,.95,.38), new PixHSV(45, 1, .9)))
{
if(blob.getDensity() > .63 && blob.getDensity() < .85)
{
if(blob.getAspectRatio() >= .9 && blob.getAspectRatio() <= 1.4)
{
duck = blob;
}
}
}
}
}
if(duck != null)
duck.setObjectOfInterest("Duck");
return duck;
}
/**
*Hardcoded classification criteria that consistently identifies the Wheel Object
* @param blobs
* @return
*/
public static Blob findWheel(Map<Integer,Blob> blobs)
{
Blob wheel = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(Extra.PixelRange(blob.avePix, new PixHSV(35,.39,.40), new PixHSV(47, .53,.70)))
{
if(blob.getDensity() > .25 && blob.getDensity() < 1.1)
{
if(blob.getAspectRatio() >= .7 && blob.getAspectRatio() <= 1.3)
wheel = blob;
}
}
}
if(wheel != null)
wheel.setObjectOfInterest("Wheel");
return wheel;
}
/**
*Hardcoded classification criteria that attempts to identify the mexican hat
* @param blobs
* @return
*/
public static Blob findMexicanHat(Map<Integer,Blob> blobs)
{
Blob mexicanHat = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getPass().equals("mexican_hat"))
{
if(Extra.PixelRange(blob.avePix, new PixHSV(125,.49,.15), new PixHSV(165, .62,.25)))
{
if(blob.getDensity() > .20 && blob.getDensity() < .7)
{
if(blob.getAspectRatio() >= .8 && blob.getAspectRatio() <= 1.8)
mexicanHat = blob;
}
}
}
}
if(mexicanHat != null)
mexicanHat.setObjectOfInterest("MexicanHat");
return mexicanHat;
}
/**
*Hardcoded classification criteria that consistently identifies sign elements
* @param blobs
* @return
*/
public static Blob findSign(Map<Integer,Blob> blobs)
{
Blob sign = null;
for(Map.Entry<Integer,Blob> blobEntry : blobs.entrySet())
{
Blob blob = blobEntry.getValue();
if(blob.getAspectRatio() >= .8 && blob.getAspectRatio() <= 1.1)
sign = blob;
}
return sign;
}
}
|
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.rollup.job;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ParentTaskAssigningClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.persistent.AllocatedPersistentTask;
import org.elasticsearch.persistent.PersistentTaskState;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksExecutor;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.ClientHelper;
import org.elasticsearch.xpack.core.indexing.IndexerState;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction;
import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction;
import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats;
import org.elasticsearch.xpack.core.rollup.job.RollupJob;
import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus;
import org.elasticsearch.xpack.core.scheduler.CronSchedule;
import org.elasticsearch.xpack.core.scheduler.SchedulerEngine;
import org.elasticsearch.xpack.rollup.Rollup;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
/**
* This class contains the high-level logic that drives the rollup job. The allocated task contains transient state
* which drives the indexing, and periodically updates it's parent PersistentTask with the indexing's current position.
*
* Each RollupJobTask also registers itself into the Scheduler so that it can be triggered on the cron's interval.
*/
public class RollupJobTask extends AllocatedPersistentTask implements SchedulerEngine.Listener {
private static final Logger logger = LogManager.getLogger(RollupJobTask.class.getName());
static final String SCHEDULE_NAME = RollupField.TASK_NAME + "/schedule";
public static class RollupJobPersistentTasksExecutor extends PersistentTasksExecutor<RollupJob> {
private final Client client;
private final SchedulerEngine schedulerEngine;
private final ThreadPool threadPool;
public RollupJobPersistentTasksExecutor(Client client, SchedulerEngine schedulerEngine, ThreadPool threadPool) {
super(RollupField.TASK_NAME, Rollup.TASK_THREAD_POOL_NAME);
this.client = client;
this.schedulerEngine = schedulerEngine;
this.threadPool = threadPool;
}
@Override
protected void nodeOperation(AllocatedPersistentTask task, @Nullable RollupJob params, PersistentTaskState state) {
RollupJobTask rollupJobTask = (RollupJobTask) task;
SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(SCHEDULE_NAME + "_" + params.getConfig().getId(),
new CronSchedule(params.getConfig().getCron()));
// Note that while the task is added to the scheduler here, the internal state will prevent
// it from doing any work until the task is "started" via the StartJob api
schedulerEngine.register(rollupJobTask);
schedulerEngine.add(schedulerJob);
logger.info("Rollup job [" + params.getConfig().getId() + "] created.");
}
@Override
protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId,
PersistentTasksCustomMetaData.PersistentTask<RollupJob> persistentTask,
Map<String, String> headers) {
return new RollupJobTask(id, type, action, parentTaskId, persistentTask.getParams(),
(RollupJobStatus) persistentTask.getState(), client, schedulerEngine, threadPool, headers);
}
}
/**
* An implementation of {@link RollupIndexer} that uses a {@link Client} to perform search
* and bulk requests.
* It uses the {@link ThreadPool.Names#GENERIC} thread pool to fire the first request in order
* to make sure that we never use the same thread than the persistent task to execute the rollup.
* The execution in the generic thread pool should terminate quickly since we use async call in the {@link Client}
* to perform all requests.
*/
protected class ClientRollupPageManager extends RollupIndexer {
private final Client client;
private final RollupJob job;
ClientRollupPageManager(RollupJob job, IndexerState initialState, Map<String, Object> initialPosition,
Client client) {
super(threadPool.executor(ThreadPool.Names.GENERIC), job, new AtomicReference<>(initialState), initialPosition);
this.client = client;
this.job = job;
}
@Override
protected void doNextSearch(SearchRequest request, ActionListener<SearchResponse> nextPhase) {
ClientHelper.executeWithHeadersAsync(job.getHeaders(), ClientHelper.ROLLUP_ORIGIN, client, SearchAction.INSTANCE, request,
nextPhase);
}
@Override
protected void doNextBulk(BulkRequest request, ActionListener<BulkResponse> nextPhase) {
ClientHelper.executeWithHeadersAsync(job.getHeaders(), ClientHelper.ROLLUP_ORIGIN, client, BulkAction.INSTANCE, request,
nextPhase);
}
@Override
protected void doSaveState(IndexerState indexerState, Map<String, Object> position, Runnable next) {
if (indexerState.equals(IndexerState.ABORTING)) {
// If we're aborting, just invoke `next` (which is likely an onFailure handler)
next.run();
} else {
// Otherwise, attempt to persist our state
final RollupJobStatus state = new RollupJobStatus(indexerState, getPosition());
logger.debug("Updating persistent state of job [" + job.getConfig().getId() + "] to [" + indexerState.toString() + "]");
updatePersistentTaskState(state, ActionListener.wrap(task -> next.run(), exc -> next.run()));
}
}
@Override
protected void onFinish(ActionListener<Void> listener) {
logger.debug("Finished indexing for job [" + job.getConfig().getId() + "]");
listener.onResponse(null);
}
@Override
protected void onFailure(Exception exc) {
logger.warn("Rollup job [" + job.getConfig().getId() + "] failed with an exception: ", exc);
}
@Override
protected void onAbort() {
shutdown();
}
}
private final RollupJob job;
private final SchedulerEngine schedulerEngine;
private final ThreadPool threadPool;
private final Client client;
private final IndexerState initialIndexerState;
private final Map<String, Object> initialPosition;
private RollupIndexer indexer;
RollupJobTask(long id, String type, String action, TaskId parentTask, RollupJob job, RollupJobStatus state,
Client client, SchedulerEngine schedulerEngine, ThreadPool threadPool, Map<String, String> headers) {
super(id, type, action, RollupField.NAME + "_" + job.getConfig().getId(), parentTask, headers);
this.job = job;
this.schedulerEngine = schedulerEngine;
this.threadPool = threadPool;
this.client = client;
if (state == null) {
this.initialIndexerState = null;
this.initialPosition = null;
} else {
this.initialIndexerState = state.getIndexerState();
this.initialPosition = state.getPosition();
}
}
@Override
protected void init(PersistentTasksService persistentTasksService, TaskManager taskManager,
String persistentTaskId, long allocationId) {
super.init(persistentTasksService, taskManager, persistentTaskId, allocationId);
// If initial position is not null, we are resuming rather than starting fresh.
IndexerState indexerState = IndexerState.STOPPED;
if (initialIndexerState != null) {
logger.debug("We have existing state, setting state to [" + initialIndexerState + "] " +
"and current position to [" + initialPosition + "] for job [" + job.getConfig().getId() + "]");
if (initialIndexerState.equals(IndexerState.INDEXING)) {
/*
* If we were indexing, we have to reset back to STARTED otherwise the indexer will be "stuck" thinking
* it is indexing but without the actual indexing thread running.
*/
indexerState = IndexerState.STARTED;
} else if (initialIndexerState.equals(IndexerState.ABORTING) || initialIndexerState.equals(IndexerState.STOPPING)) {
// It shouldn't be possible to persist ABORTING, but if for some reason it does,
// play it safe and restore the job as STOPPED. An admin will have to clean it up,
// but it won't be running, and won't delete itself either. Safest option.
// If we were STOPPING, that means it persisted but was killed before finally stopped... so ok
// to restore as STOPPED
indexerState = IndexerState.STOPPED;
} else {
indexerState = initialIndexerState;
}
}
this.indexer = new ClientRollupPageManager(job, indexerState, initialPosition,
new ParentTaskAssigningClient(client, getParentTaskId()));
}
@Override
public Status getStatus() {
return new RollupJobStatus(indexer.getState(), indexer.getPosition());
}
/**
* Gets the stats for this task.
* @return The stats of this task
*/
public RollupIndexerJobStats getStats() {
return indexer.getStats();
}
/**
* The config of this task
* @return The config for this task
*/
public RollupJobConfig getConfig() {
return job.getConfig();
}
/**
* Attempt to start the indexer.
* - If the indexer is started/indexing, returns OK
* - If the indexer is stopped, starts task, updates persistent task's status, returns ok
* - Anything else returns error
*
* Note that while the job is started, the indexer will not necessarily run immediately. That
* will only occur when the scheduler triggers it based on the cron
*
* @param listener The listener that started the action, so that we can signal completion/failure
*/
public synchronized void start(ActionListener<StartRollupJobAction.Response> listener) {
final IndexerState prevState = indexer.getState();
if (prevState == IndexerState.STARTED || prevState == IndexerState.INDEXING) {
// We're already running so just return acknowledgement
logger.debug("Indexer already running (State: [" + prevState + "]), acknowledging start without change.");
listener.onResponse(new StartRollupJobAction.Response(true));
return;
} else if (prevState != IndexerState.STOPPED) {
// if we're not already started/indexing, we must be STOPPED to get started
listener.onFailure(new ElasticsearchException("Cannot start task for Rollup Job [" + job.getConfig().getId() + "] because"
+ " state was [" + prevState + "]"));
return;
}
final IndexerState newState = indexer.start();
if (newState != IndexerState.STARTED) {
listener.onFailure(new ElasticsearchException("Cannot start task for Rollup Job [" + job.getConfig().getId() + "] because"
+ " new state was [" + newState + "]"));
return;
}
final RollupJobStatus state = new RollupJobStatus(IndexerState.STARTED, indexer.getPosition());
logger.debug("Updating state for rollup job [" + job.getConfig().getId() + "] to [" + state.getIndexerState() + "][" +
state.getPosition() + "]");
updatePersistentTaskState(state,
ActionListener.wrap(
(task) -> {
logger.debug("Successfully updated state for rollup job [" + job.getConfig().getId() + "] to ["
+ state.getIndexerState() + "][" + state.getPosition() + "]");
listener.onResponse(new StartRollupJobAction.Response(true));
},
(exc) -> {
// We were unable to update the persistent status, so we need to shutdown the indexer too.
indexer.stop();
listener.onFailure(
new ElasticsearchException("Error while updating state for rollup job [" + job.getConfig().getId()
+ "] to [" + state.getIndexerState() + "].", exc)
);
}
)
);
}
/**
* Attempt to stop the indexer if it is idle or actively indexing.
* If the indexer is aborted this will fail with an exception.
*
* Note that stopping the job is not immediate. It updates the persistent task's status, but then the allocated
* task has to notice and stop itself (which may take some time, depending on where in the indexing cycle it is).
*
* This method will, however, return as soon as the persistent task has acknowledge the status update.
*
* @param listener The listener that is requesting the stop, so that we can signal completion/failure
*/
public synchronized void stop(ActionListener<StopRollupJobAction.Response> listener) {
final IndexerState newState = indexer.stop();
switch (newState) {
case STOPPED:
listener.onResponse(new StopRollupJobAction.Response(true));
break;
case STOPPING:
// update the persistent state to STOPPED. There are two scenarios and both are safe:
// 1. we persist STOPPED now, indexer continues a bit then sees the flag and checkpoints another
// STOPPED with the more recent position.
// 2. we persist STOPPED now, indexer continues a bit but then dies. When/if we resume we'll pick up
// at last checkpoint, overwrite some docs and eventually checkpoint.
RollupJobStatus state = new RollupJobStatus(IndexerState.STOPPED, indexer.getPosition());
updatePersistentTaskState(state,
ActionListener.wrap(
(task) -> {
logger.debug("Successfully updated state for rollup job [" + job.getConfig().getId()
+ "] to [" + state.getIndexerState() + "]");
listener.onResponse(new StopRollupJobAction.Response(true));
},
(exc) -> {
listener.onFailure(new ElasticsearchException("Error while updating state for rollup job ["
+ job.getConfig().getId() + "] to [" + state.getIndexerState() + "].", exc));
})
);
break;
default:
listener.onFailure(new ElasticsearchException("Cannot stop task for Rollup Job [" + job.getConfig().getId() + "] because"
+ " state was [" + newState + "]"));
break;
}
}
/**
* Attempt to gracefully cleanup the rollup job so it can be terminated.
* This tries to remove the job from the scheduler, and potentially any other
* cleanup operations in the future
*/
synchronized void shutdown() {
try {
logger.info("Rollup indexer [" + job.getConfig().getId() + "] received abort request, stopping indexer.");
schedulerEngine.remove(SCHEDULE_NAME + "_" + job.getConfig().getId());
schedulerEngine.unregister(this);
} catch (Exception e) {
markAsFailed(e);
return;
}
markAsCompleted();
}
/**
* This is called when the persistent task signals that the allocated task should be terminated.
* Termination in the task framework is essentially voluntary, as the allocated task can only be
* shut down from the inside.
*/
@Override
public synchronized void onCancelled() {
logger.info("Received cancellation request for Rollup job [" + job.getConfig().getId() + "], state: [" + indexer.getState() + "]");
if (indexer.abort()) {
// there is no background job running, we can shutdown safely
shutdown();
}
}
/**
* This is called by the ScheduleEngine when the cron triggers.
*
* @param event The event that caused the trigger
*/
@Override
public synchronized void triggered(SchedulerEngine.Event event) {
// Verify this is actually the event that we care about, then trigger the indexer.
// Note that the status of the indexer is checked in the indexer itself
if (event.getJobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) {
logger.debug("Rollup indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]");
indexer.maybeTriggerAsyncJob(System.currentTimeMillis());
}
}
}
|
|
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.feature.vt.api.markupitem;
import static ghidra.feature.vt.gui.util.VTOptionDefines.*;
import static org.junit.Assert.*;
import java.util.Collection;
import java.util.List;
import org.junit.Test;
import ghidra.feature.vt.api.main.*;
import ghidra.feature.vt.api.markuptype.*;
import ghidra.feature.vt.gui.task.ForceApplyMarkupItemTask;
import ghidra.feature.vt.gui.util.MatchInfo;
import ghidra.feature.vt.gui.util.VTMatchApplyChoices.*;
import ghidra.framework.options.ToolOptions;
import ghidra.program.model.address.Address;
import ghidra.program.model.listing.*;
import ghidra.program.model.symbol.SourceType;
import ghidra.util.task.TaskMonitorAdapter;
public class ForceApplyOfExcludedMarkupTest extends AbstractFunctionSignatureMarkupTest {
// Default Apply Markup Options
// ============================
// Data Match Data Type .......... = Replace Undefined Data Only
// End of Line Comment ........... = Add To Existing
// Function Call Fixup ........... = Replace
// Function Calling Convention ... = Replace If Same Language
// Function Inline ............... = Replace
// Function Name ................. = Replace Always
// Function No Return ............ = Replace
// Function Parameter Comments ... = Add To Existing
// Function Parameter Data Types . = Replace Undefined Data Types Only
// Function Parameter Names ...... = User Priority Replace
// Function Return Type .......... = Replace Undefined Data Types Only
// Function Signature ............ = Replace When Same Parameter Count
// Function Var Args ............. = Replace
// Labels ........................ = Add
// Plate Comment ................. = Add To Existing
// Post Comment .................. = Add To Existing
// Pre Comment ................... = Add To Existing
// Repeatable Comment ............ = Add To Existing
// Set Excluded Markup Items To Ignored ... = false
// Set Incomplete Markup Items To Ignored . = false
// addPerson 004011a0 FUN... 00411830 2 params
// call_Strncpy 0x00411ab0 FUN... 0x00411a90 3 params w/ matching types
public ForceApplyOfExcludedMarkupTest() {
super();
}
@Test
public void testForceApplyForExcludedFunctionName() throws Exception {
useMatch("0x00411ab0", "0x00411a90");
// Check initial values
checkFunctionNames("Call_strncpy_s", "FUN_00411a90");
checkSignatures("void Call_strncpy_s(char * _Dst, char * _Src, rsize_t _MaxCount)",
"void FUN_00411a90(char * param_1, char * param_2, rsize_t param_3)");
// Set the function name options for this test
ToolOptions applyOptions = vtTestEnv.getVTController().getOptions();
setApplyMarkupOptionsToDefaults(applyOptions);
// Now change the options where we don't want the default value.
applyOptions.setEnum(FUNCTION_NAME, FunctionNameChoices.EXCLUDE);
checkMatchStatus(VTAssociationStatus.AVAILABLE);
checkFunctionNameStatus(testMatch, VTMarkupItemStatus.UNAPPLIED);
// Test Apply of Function Name Markup
List<VTMarkupItem> functionNameMarkupItems =
getSpecificTypeOfMarkup(FunctionNameMarkupType.class, testMatch, true);
assertEquals(1, functionNameMarkupItems.size());
forceMarkup(functionNameMarkupItems);
// Verify the markup was applied.
checkFunctionNames("Call_strncpy_s", "Call_strncpy_s");
checkMatchStatus(VTAssociationStatus.ACCEPTED);
checkFunctionNameStatus(testMatch, VTMarkupItemStatus.ADDED);
}
@Test
public void testForceApplyForExcludedFunctionName2() throws Exception {
useMatch("0x00411ab0", "0x00411a90");
setFunctionName(destinationFunction, "MyCallStrncpy", SourceType.USER_DEFINED);
// Check initial values
checkFunctionNames("Call_strncpy_s", "MyCallStrncpy");
checkSignatures("void Call_strncpy_s(char * _Dst, char * _Src, rsize_t _MaxCount)",
"void MyCallStrncpy(char * param_1, char * param_2, rsize_t param_3)");
// Set the function name options for this test
ToolOptions applyOptions = vtTestEnv.getVTController().getOptions();
setApplyMarkupOptionsToDefaults(applyOptions);
// Now change the options where we don't want the default value.
applyOptions.setEnum(FUNCTION_NAME, FunctionNameChoices.EXCLUDE);
checkMatchStatus(VTAssociationStatus.AVAILABLE);
checkFunctionNameStatus(testMatch, VTMarkupItemStatus.UNAPPLIED);
// Test Apply of Function Name Markup
List<VTMarkupItem> functionNameMarkupItems =
getSpecificTypeOfMarkup(FunctionNameMarkupType.class, testMatch, true);
assertEquals(1, functionNameMarkupItems.size());
forceMarkup(functionNameMarkupItems);
// Verify the markup was applied.
checkFunctionNames("Call_strncpy_s", "Call_strncpy_s");
checkMatchStatus(VTAssociationStatus.ACCEPTED);
checkFunctionNameStatus(testMatch, VTMarkupItemStatus.ADDED);
}
@Test
public void testForceApplyForExcludedFunctionSignature() throws Exception {
useMatch("0x00411ab0", "0x00411a90");
SourceType[] originalSourceTypes = getParameterSourceTypes(sourceFunction);
// Check initial values
checkSignatures("void Call_strncpy_s(char * _Dst, char * _Src, rsize_t _MaxCount)",
"void FUN_00411a90(char * param_1, char * param_2, rsize_t param_3)");
// Set the function signature options for this test
ToolOptions applyOptions = vtTestEnv.getVTController().getOptions();
setApplyMarkupOptionsToDefaults(applyOptions);
// Now change the options where we don't want the default value.
applyOptions.setEnum(FUNCTION_SIGNATURE, FunctionSignatureChoices.EXCLUDE);
checkMatchStatus(VTAssociationStatus.AVAILABLE);
checkFunctionSignatureStatus(testMatch, VTMarkupItemStatus.UNAPPLIED);
// Test Apply of Signature Markup
List<VTMarkupItem> signatureMarkupItems =
getSpecificTypeOfMarkup(FunctionSignatureMarkupType.class, testMatch, true);
assertEquals(1, signatureMarkupItems.size());
forceFunctionSignatureMarkup(signatureMarkupItems);
// Verify the markup was applied.
checkSignatures("void Call_strncpy_s(char * _Dst, char * _Src, rsize_t _MaxCount)",
"void FUN_00411a90(char * _Dst, char * _Src, rsize_t _MaxCount)");
checkMatchStatus(VTAssociationStatus.ACCEPTED);
checkFunctionSignatureStatus(testMatch, VTMarkupItemStatus.REPLACED);
checkDestinationParameterNameSourceTypes(originalSourceTypes);
}
@Test
public void testForceApplyForExcludedPlateComment() throws Exception {
genericTestForceApplyForExcludedComment(PlateCommentMarkupType.class,
CodeUnit.PLATE_COMMENT, PLATE_COMMENT);
}
@Test
public void testForceApplyForExcludedPreComment() throws Exception {
genericTestForceApplyForExcludedComment(PreCommentMarkupType.class, CodeUnit.PRE_COMMENT,
PRE_COMMENT);
}
@Test
public void testForceApplyForExcludedEOLComment() throws Exception {
genericTestForceApplyForExcludedComment(EolCommentMarkupType.class, CodeUnit.EOL_COMMENT,
END_OF_LINE_COMMENT);
}
@Test
public void testForceApplyForExcludeRepeatableComment() throws Exception {
genericTestForceApplyForExcludedComment(RepeatableCommentMarkupType.class,
CodeUnit.REPEATABLE_COMMENT, REPEATABLE_COMMENT);
}
@Test
public void testForceApplyForExcludedPostComment() throws Exception {
genericTestForceApplyForExcludedComment(PostCommentMarkupType.class, CodeUnit.POST_COMMENT,
POST_COMMENT);
}
private void genericTestForceApplyForExcludedComment(
Class<? extends CommentMarkupType> commentMarkupClass, int commentType,
String vtOptionName) throws Exception {
useMatch("0x00411ab0", "0x00411a90");
sourceAddress = sourceFunction.getEntryPoint();
destinationAddress = destinationFunction.getEntryPoint();
setComment(sourceProgram, sourceAddress, commentType, "Source comment.");
setComment(destinationProgram, destinationAddress, commentType, "Destination comment.");
// Check initial values
checkComments(commentType, sourceAddress, "Source comment.", destinationAddress,
"Destination comment.");
// Set the function signature options for this test
ToolOptions applyOptions = vtTestEnv.getVTController().getOptions();
setApplyMarkupOptionsToDefaults(applyOptions);
// Now change the options where we don't want the default value.
applyOptions.setEnum(vtOptionName, CommentChoices.EXCLUDE);
checkMatchStatus(VTAssociationStatus.AVAILABLE);
checkCommentStatus(testMatch, commentType, VTMarkupItemStatus.UNAPPLIED);
// Test Apply of Comment Markup
List<VTMarkupItem> commentMarkupItems =
getSpecificTypeOfMarkup(commentMarkupClass, testMatch, true);
assertTrue(commentMarkupItems.size() > 0);
forceMarkup(commentMarkupItems);
// Verify the markup was applied.
checkComments(commentType, sourceAddress, "Source comment.", destinationAddress,
"Destination comment.\nSource comment.");
checkMatchStatus(VTAssociationStatus.ACCEPTED);
checkCommentStatus(testMatch, commentType, VTMarkupItemStatus.ADDED);
}
//----------------------------
protected void checkComments(final int commentType, final Address sourceAddr,
final String expectedSourceComment, final Address destinationAddr,
final String expectedDestinationComment) {
final String[] sourceStringBox = new String[1];
final String[] destinationStringBox = new String[1];
runSwing(() -> {
Listing sourceListing = sourceProgram.getListing();
Listing destinationListing = destinationProgram.getListing();
sourceStringBox[0] = sourceListing.getComment(commentType, sourceAddr);
destinationStringBox[0] = destinationListing.getComment(commentType, destinationAddr);
});
assertEquals(expectedSourceComment, sourceStringBox[0]);
assertEquals(expectedDestinationComment, destinationStringBox[0]);
}
protected void checkFunctionNames(String expectedSourceName, String expectedDestinationName) {
final String[] sourceStringBox = new String[1];
final String[] destinationStringBox = new String[1];
runSwing(() -> {
sourceStringBox[0] = sourceFunction.getName();
destinationStringBox[0] = destinationFunction.getName();
});
assertEquals(expectedSourceName, sourceStringBox[0]);
assertEquals(expectedDestinationName, destinationStringBox[0]);
}
protected void checkFunctionNameStatus(VTMatch match, VTMarkupItemStatus expectedStatus) {
VTMarkupItem markupItem = getFunctionNameMarkup(match);
if (expectedStatus == null && markupItem == null) {
return;
}
assertNotNull(markupItem);
checkMarkupStatus(markupItem, expectedStatus);
}
protected VTMarkupItem getFunctionNameMarkup(VTMatch match) {
MatchInfo matchInfo = controller.getMatchInfo(match);
Collection<VTMarkupItem> appliableMarkupItems =
matchInfo.getAppliableMarkupItems(TaskMonitorAdapter.DUMMY_MONITOR);
for (VTMarkupItem vtMarkupItem : appliableMarkupItems) {
if (vtMarkupItem.getMarkupType() instanceof FunctionNameMarkupType) {
return vtMarkupItem;
}
}
return null;
}
protected void checkCommentStatus(VTMatch match, int commentType,
VTMarkupItemStatus expectedStatus) {
VTMarkupItem markupItem = getCommentMarkup(match, commentType);
if (expectedStatus == null && markupItem == null) {
return;
}
assertNotNull(markupItem);
checkMarkupStatus(markupItem, expectedStatus);
}
protected VTMarkupItem getCommentMarkup(VTMatch match, int commentType) {
MatchInfo matchInfo = controller.getMatchInfo(match);
Collection<VTMarkupItem> appliableMarkupItems =
matchInfo.getAppliableMarkupItems(TaskMonitorAdapter.DUMMY_MONITOR);
for (VTMarkupItem vtMarkupItem : appliableMarkupItems) {
switch (commentType) {
case CodeUnit.PLATE_COMMENT:
if (vtMarkupItem.getMarkupType() instanceof PlateCommentMarkupType) {
return vtMarkupItem;
}
continue;
case CodeUnit.PRE_COMMENT:
if (vtMarkupItem.getMarkupType() instanceof PreCommentMarkupType) {
return vtMarkupItem;
}
continue;
case CodeUnit.EOL_COMMENT:
if (vtMarkupItem.getMarkupType() instanceof EolCommentMarkupType) {
return vtMarkupItem;
}
continue;
case CodeUnit.REPEATABLE_COMMENT:
if (vtMarkupItem.getMarkupType() instanceof RepeatableCommentMarkupType) {
return vtMarkupItem;
}
continue;
case CodeUnit.POST_COMMENT:
if (vtMarkupItem.getMarkupType() instanceof PostCommentMarkupType) {
return vtMarkupItem;
}
continue;
}
}
return null;
}
protected void forceMarkup(List<VTMarkupItem> markupItems) {
ForceApplyMarkupItemTask task =
new ForceApplyMarkupItemTask(session, markupItems, controller.getOptions());
runTask(task);
waitOnPossibleBackgroundProcessing();
}
protected void setComment(Program program, Address address, int commentType, String comment) {
int transaction = -1;
try {
transaction = program.startTransaction("Test - Set Comment: " + address.toString(true));
Listing listing = program.getListing();
listing.setComment(address, commentType, comment);
}
finally {
program.endTransaction(transaction, true);
}
waitOnPossibleBackgroundProcessing();
}
private void waitOnPossibleBackgroundProcessing() {
waitForSwing();
waitForBusyTool(vtTestEnv.getTool());
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pig.experimental.logical.rules;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.pig.experimental.logical.expression.ProjectExpression;
import org.apache.pig.experimental.logical.expression.LogicalExpressionPlan;
import org.apache.pig.experimental.logical.relational.LOFilter;
import org.apache.pig.experimental.logical.relational.LOJoin;
import org.apache.pig.experimental.logical.relational.LogicalPlan;
import org.apache.pig.experimental.logical.relational.LogicalRelationalOperator;
import org.apache.pig.experimental.logical.relational.LogicalSchema;
import org.apache.pig.experimental.plan.Operator;
import org.apache.pig.experimental.plan.OperatorPlan;
import org.apache.pig.experimental.plan.OperatorSubPlan;
import org.apache.pig.experimental.plan.optimizer.Rule;
import org.apache.pig.experimental.plan.optimizer.Transformer;
import org.apache.pig.impl.util.Pair;
public class PushUpFilter extends Rule {
public PushUpFilter(String n) {
super(n);
}
@Override
public Transformer getNewTransformer() {
return new PushUpFilterTransformer();
}
public class PushUpFilterTransformer extends Transformer {
private OperatorSubPlan subPlan;
@Override
public boolean check(OperatorPlan matched) throws IOException {
// check if it is inner join
LOJoin join = (LOJoin)matched.getSources().get(0);
boolean[] innerFlags = join.getInnerFlags();
for(boolean inner: innerFlags) {
if (!inner){
return false;
}
}
Operator next = matched.getSinks().get(0);
while(next != null && next instanceof LOFilter) {
LOFilter filter = (LOFilter)next;
LogicalExpressionPlan filterPlan = filter.getFilterPlan();
// collect all uids used in the filter plan
Set<Long> uids = new HashSet<Long>();
Iterator<Operator> iter = filterPlan.getOperators();
while(iter.hasNext()) {
Operator op = iter.next();
if (op instanceof ProjectExpression) {
long uid = ((ProjectExpression)op).getUid();
uids.add(uid);
}
}
List<Operator> preds = currentPlan.getPredecessors(join);
for(int j=0; j<preds.size(); j++) {
if (hasAll((LogicalRelationalOperator)preds.get(j), uids)) {
return true;
}
}
// if current filter can not move up, check next filter
List<Operator> l = currentPlan.getSuccessors(filter);
if (l != null) {
next = l.get(0);
} else {
next = null;
}
}
return false;
}
@Override
public void transform(OperatorPlan matched) throws IOException {
subPlan = new OperatorSubPlan(currentPlan);
LOJoin join = (LOJoin)matched.getSources().get(0);
subPlan.add(join);
Operator next = matched.getSinks().get(0);
while(next != null && next instanceof LOFilter) {
LOFilter filter = (LOFilter)next;
subPlan.add(filter);
LogicalExpressionPlan filterPlan = filter.getFilterPlan();
// collect all uids used in the filter plan
Set<Long> uids = new HashSet<Long>();
Iterator<Operator> iter = filterPlan.getOperators();
while(iter.hasNext()) {
Operator op = iter.next();
if (op instanceof ProjectExpression) {
long uid = ((ProjectExpression)op).getUid();
uids.add(uid);
}
}
// find the farthest predecessor that has all the fields
LogicalRelationalOperator input = join;
List<Operator> preds = currentPlan.getPredecessors(input);
while(preds != null) {
boolean found = false;
for(int j=0; j<preds.size(); j++) {
if (hasAll((LogicalRelationalOperator)preds.get(j), uids)) {
input = (LogicalRelationalOperator)preds.get(j);
subPlan.add(input);
found = true;
break;
}
}
if (!found) {
break;
}
preds = currentPlan.getPredecessors(input);
}
if (input != join) {
Operator pred = currentPlan.getPredecessors(filter).get(0);
Operator succed = currentPlan.getSuccessors(filter).get(0);
subPlan.add(succed);
Pair<Integer, Integer> p1 = currentPlan.disconnect(pred, filter);
Pair<Integer, Integer> p2 = currentPlan.disconnect(filter, succed);
currentPlan.connect(pred, p1.first, succed, p2.second);
succed = currentPlan.getSuccessors(input).get(0);
Pair<Integer, Integer> p3 = currentPlan.disconnect(input, succed);
currentPlan.connect(input, p3.first, filter, 0);
currentPlan.connect(filter, 0, succed, p3.second);
return;
}
List<Operator> l = currentPlan.getSuccessors(filter);
if (l != null) {
next = l.get(0);
} else {
next = null;
}
}
}
// check if a relational operator contains all of the specified uids
private boolean hasAll(LogicalRelationalOperator op, Set<Long> uids) {
LogicalSchema schema = op.getSchema();
for(long uid: uids) {
if (schema.findField(uid) == -1) {
return false;
}
}
return true;
}
@Override
public OperatorPlan reportChanges() {
return subPlan;
}
}
@Override
protected OperatorPlan buildPattern() {
// the pattern that this rule looks for
// is join -> filter
LogicalPlan plan = new LogicalPlan();
LogicalRelationalOperator op1 = new LOJoin(plan);
LogicalRelationalOperator op2 = new LOFilter(plan);
plan.add(op1);
plan.add(op2);
plan.connect(op1, op2);
return plan;
}
}
|
|
/*
* Copyright 2004-2006 Stefan Reuter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.asteriskjava.manager.event;
import org.asteriskjava.util.AstState;
import java.util.Map;
/**
* A StatusEvent is triggered for each active channel in response to a StatusAction.
*
* @author srt
* @version $Id$
* @see org.asteriskjava.manager.action.StatusAction
*/
public class StatusEvent extends ResponseEvent
{
/**
* Serial version identifier.
*/
private static final long serialVersionUID = -3619197512835308812L;
private String channel;
private String callerIdNum;
private String callerIdName;
private String accountCode;
private Integer channelState;
private String channelStateDesc;
private String context;
private String extension;
private Integer priority;
private Integer seconds;
private String bridgedChannel;
private String bridgedUniqueId;
private String uniqueId;
private String connectedlinenum;
private String connectedlinename;
private Map<String, String> variables;
public StatusEvent(Object source)
{
super(source);
}
/**
* Returns the name of this channel.
*
* @return the name of this channel.
*/
public String getChannel()
{
return channel;
}
/**
* Sets the name of this channel.
*
* @param channel the name of this channel.
*/
public void setChannel(String channel)
{
this.channel = channel;
}
/**
* Returns the Caller*ID Number of this channel.<p>
* This property is deprecated as of Asterisk 1.4, use {@link #getCallerIdNum()} instead.
*
* @return the Caller*ID Number of this channel or <code>null</code> if none is available.
* @deprecated
*/
@Deprecated public String getCallerId()
{
return callerIdNum;
}
/**
* Sets the Caller*ID Number of this channel.<p>
* This property is deprecated as of Asterisk 1.4.
*
* @param callerIdNum the Caller*ID Number to set.
*/
public void setCallerId(String callerIdNum)
{
this.callerIdNum = callerIdNum;
}
/**
* Returns the Caller*ID Number of this channel.
*
* @return the Caller*ID Number of this channel or <code>null</code> if none is available.
* @since 0.3
*/
public String getCallerIdNum()
{
return callerIdNum;
}
/**
* Sets the Caller*ID Number of this channel.
*
* @param callerIdNum the Caller*ID Number to set.
* @since 0.3
*/
public void setCallerIdNum(String callerIdNum)
{
this.callerIdNum = callerIdNum;
}
/**
* Returns the Caller*ID Name of this channel.
*
* @return the Caller*ID Name of this channel or <code>null</code> if none is available.
*/
public String getCallerIdName()
{
return callerIdName;
}
/**
* Sets the Caller*ID Name of this channel.
*
* @param callerIdName the Caller*ID Name of this channel.
*/
public void setCallerIdName(String callerIdName)
{
this.callerIdName = callerIdName;
}
/**
* Returns the account code of this channel.
*
* @return the account code of this channel.
* @since 1.0.0
*/
public String getAccountCode()
{
return accountCode;
}
/**
* Sets the account code of this channel.
*
* @param accountCode the account code of this channel.
* @since 1.0.0
*/
public void setAccountCode(String accountCode)
{
this.accountCode = accountCode;
}
/**
* Returns the account code of this channel.
*
* @return the account code of this channel.
* @deprecated since 1.0.0, use {@link #getAccountCode()} instead.
*/
@Deprecated public String getAccount()
{
return accountCode;
}
/**
* Sets the account code of this channel.<p>
* Asterisk versions up to 1.4 use the "Account" property instead of "AccountCode".
*
* @param account the account code of this channel.
*/
public void setAccount(String account)
{
this.accountCode = account;
}
/**
* Returns the state of the channel.<p>
* For Asterisk versions prior to 1.6 (that do not send the numeric value) it is derived
* from the descriptive text.
*
* @return the state of the channel.
* @since 1.0.0
*/
public Integer getChannelState()
{
return channelState == null ? AstState.str2state(channelStateDesc) : channelState;
}
/**
* Sets the state of the channel.
*
* @param channelState the state of the channel.
* @since 1.0.0
*/
public void setChannelState(Integer channelState)
{
this.channelState = channelState;
}
/**
* Returns the state of the channel as a descriptive text.
*
* @return the state of the channel as a descriptive text.
* @since 1.0.0
*/
public String getChannelStateDesc()
{
return channelStateDesc;
}
public void setChannelStateDesc(String channelStateDesc)
{
this.channelStateDesc = channelStateDesc;
}
/**
* Returns the state of the channel as a descriptive text.
*
* @return the state of the channel as a descriptive text.
* @deprecated use {@link #getChannelStateDesc()} instead.
*/
@Deprecated public String getState()
{
return channelStateDesc;
}
public void setState(String state)
{
this.channelStateDesc = state;
}
public String getContext()
{
return context;
}
public void setContext(String context)
{
this.context = context;
}
public String getExtension()
{
return extension;
}
public void setExtension(String extension)
{
this.extension = extension;
}
public Integer getPriority()
{
return priority;
}
public void setPriority(Integer priority)
{
this.priority = priority;
}
/**
* Returns the number of elapsed seconds.
*
* @return the number of elapsed seconds.
*/
public Integer getSeconds()
{
return seconds;
}
/**
* Sets the number of elapsed seconds.
*
* @param seconds the number of elapsed seconds.
*/
public void setSeconds(Integer seconds)
{
this.seconds = seconds;
}
/**
* Returns the name of the linked channel if this channel is bridged.
*
* @return the name of the linked channel if this channel is bridged.
* @since 1.0.0
*/
public String getBridgedChannel()
{
return bridgedChannel;
}
/**
* Sets the name of the linked channel.
*
* @param bridgedChannel the name of the linked channel if this channel is bridged.
* @since 1.0.0
*/
public void setBridgedChannel(String bridgedChannel)
{
this.bridgedChannel = bridgedChannel;
}
/**
* Returns the name of the linked channel if this channel is bridged.
*
* @return the name of the linked channel if this channel is bridged.
* @deprecated as of 1.0.0, use {@link #getBridgedChannel()} instead.
*/
@Deprecated public String getLink()
{
return bridgedChannel;
}
/**
* Sets the name of the linked channel.<p>
* Asterisk versions up to 1.4 use "Link" instead of "BridgedChannel".
*
* @param link the name of the linked channel if this channel is bridged.
*/
public void setLink(String link)
{
this.bridgedChannel = link;
}
/**
* Returns the unique id of the linked channel if this channel is bridged.<p>
* Available since Asterisk 1.6.
*
* @return the unique id of the linked channel if this channel is bridged.
* @since 1.0.0
*/
public String getBridgedUniqueId()
{
return bridgedUniqueId;
}
/**
* Sets the unique id of the linked channel if this channel is bridged.<p>
* Available since Asterisk 1.6.
*
* @param bridgedUniqueId the unique id of the linked channel if this channel is bridged.
* @since 1.0.0
*/
public void setBridgedUniqueId(String bridgedUniqueId)
{
this.bridgedUniqueId = bridgedUniqueId;
}
/**
* Returns the unique id of this channel.
*
* @return the unique id of this channel.
*/
public String getUniqueId()
{
return uniqueId;
}
/**
* Sets the unique id of this channel.
*
* @param uniqueId the unique id of this channel.
*/
public void setUniqueId(String uniqueId)
{
this.uniqueId = uniqueId;
}
/**
* Returns the channel variables if the {@link org.asteriskjava.manager.action.StatusAction#setVariables(String)}
* property has been set.<p>
* Available since Asterisk 1.6
*
* @return the channel variables.
* @since 1.0.0
*/
public Map<String, String> getVariables()
{
return variables;
}
/**
* Sets the channel variables.<p>
* Available since Asterisk 1.6
*
* @param variables the channel variables.
* @since 1.0.0
*/
public void setVariables(Map<String, String> variables)
{
this.variables = variables;
}
public String getConnectedlinenum()
{
return connectedlinenum;
}
public void setConnectedlinenum(String connectedlinenum)
{
this.connectedlinenum = connectedlinenum;
}
public String getConnectedlinename()
{
return connectedlinename;
}
public void setConnectedlinename(String connectedlinename)
{
this.connectedlinename = connectedlinename;
}
}
|
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.update;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.junit.Test;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
public class UpdateRequestTests extends ElasticsearchTestCase {
@Test
public void testUpdateRequest() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type", "1");
// simple script
request.source(XContentFactory.jsonBuilder().startObject()
.field("script", "script1")
.endObject());
Script script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
assertThat(script.getLang(), nullValue());
Map<String, Object> params = script.getParams();
assertThat(params, nullValue());
// script with params
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("script").field("inline", "script1").startObject("params")
.field("param1", "value1").endObject().endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
assertThat(script.getLang(), nullValue());
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
assertThat(params.get("param1").toString(), equalTo("value1"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
.endObject().field("inline", "script1").endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
assertThat(script.getLang(), nullValue());
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
assertThat(params.get("param1").toString(), equalTo("value1"));
// script with params and upsert
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
.endObject().field("inline", "script1").endObject().startObject("upsert").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
assertThat(script.getLang(), nullValue());
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
assertThat(params.get("param1").toString(), equalTo("value1"));
Map<String, Object> upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("upsert").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().startObject("script").startObject("params").field("param1", "value1")
.endObject().field("inline", "script1").endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
assertThat(script.getLang(), nullValue());
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
assertThat(params.get("param1").toString(), equalTo("value1"));
upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
// script with doc
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("doc").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().endObject());
Map<String, Object> doc = request.doc().sourceAsMap();
assertThat(doc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
}
/*
* TODO Remove in 2.0
*/
@Test
public void testUpdateRequestOldAPI() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type", "1");
// simple script
request.source(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject());
assertThat(request.scriptString(), equalTo("script1"));
// script with params
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject()
.field("script", "script1")
.startObject("params").field("param1", "value1").endObject()
.endObject());
assertThat(request.scriptString(), notNullValue());
assertThat(request.scriptString(), equalTo("script1"));
assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject()
.startObject("params").field("param1", "value1").endObject()
.field("script", "script1")
.endObject());
assertThat(request.scriptString(), notNullValue());
assertThat(request.scriptString(), equalTo("script1"));
assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
// script with params and upsert
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject()
.startObject("params").field("param1", "value1").endObject()
.field("script", "script1")
.startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
.endObject());
assertThat(request.scriptString(), notNullValue());
assertThat(request.scriptString(), equalTo("script1"));
assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
Map<String, Object> upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject()
.startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
.startObject("params").field("param1", "value1").endObject()
.field("script", "script1")
.endObject());
assertThat(request.scriptString(), notNullValue());
assertThat(request.scriptString(), equalTo("script1"));
assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject()
.startObject("params").field("param1", "value1").endObject()
.startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
.field("script", "script1")
.endObject());
assertThat(request.scriptString(), notNullValue());
assertThat(request.scriptString(), equalTo("script1"));
assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
// script with doc
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject()
.startObject("doc").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
.endObject());
Map<String, Object> doc = request.doc().sourceAsMap();
assertThat(doc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
}
}
|
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.coordination;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.FakeThreadPoolMasterService;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.cluster.service.MasterServiceTests;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.BaseFuture;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ClusterServiceUtils;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RequestHandlerRegistry;
import org.elasticsearch.transport.TestTransportChannel;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
public class NodeJoinTests extends ESTestCase {
private static ThreadPool threadPool;
private MasterService masterService;
private Coordinator coordinator;
private DeterministicTaskQueue deterministicTaskQueue;
private Transport transport;
@BeforeClass
public static void beforeClass() {
threadPool = new TestThreadPool(NodeJoinTests.getTestClass().getName());
}
@AfterClass
public static void afterClass() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
}
@After
public void tearDown() throws Exception {
super.tearDown();
masterService.close();
}
private static ClusterState initialState(DiscoveryNode localNode, long term, long version,
VotingConfiguration config) {
return ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName()))
.nodes(DiscoveryNodes.builder()
.add(localNode)
.localNodeId(localNode.getId()))
.metadata(Metadata.builder()
.coordinationMetadata(
CoordinationMetadata.builder()
.term(term)
.lastAcceptedConfiguration(config)
.lastCommittedConfiguration(config)
.build()))
.version(version)
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
}
private void setupFakeMasterServiceAndCoordinator(long term, ClusterState initialState) {
deterministicTaskQueue
= new DeterministicTaskQueue(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), random());
final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool();
FakeThreadPoolMasterService fakeMasterService = new FakeThreadPoolMasterService("test_node","test",
fakeThreadPool, deterministicTaskQueue::scheduleNow);
setupMasterServiceAndCoordinator(term, initialState, fakeMasterService, fakeThreadPool, Randomness.get());
fakeMasterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
coordinator.handlePublishRequest(new PublishRequest(event.state()));
publishListener.onResponse(null);
});
fakeMasterService.start();
}
private void setupRealMasterServiceAndCoordinator(long term, ClusterState initialState) {
MasterService masterService = new MasterService(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool);
AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
clusterStateRef.set(event.state());
publishListener.onResponse(null);
});
setupMasterServiceAndCoordinator(term, initialState, masterService, threadPool, new Random(Randomness.get().nextLong()));
masterService.setClusterStateSupplier(clusterStateRef::get);
masterService.start();
}
private void setupMasterServiceAndCoordinator(long term, ClusterState initialState, MasterService masterService,
ThreadPool threadPool, Random random) {
if (this.masterService != null || coordinator != null) {
throw new IllegalStateException("method setupMasterServiceAndCoordinator can only be called once");
}
this.masterService = masterService;
CapturingTransport capturingTransport = new CapturingTransport() {
@Override
protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) {
if (action.equals(HANDSHAKE_ACTION_NAME)) {
handleResponse(requestId, new TransportService.HandshakeResponse(destination, initialState.getClusterName(),
destination.getVersion()));
} else if (action.equals(JoinHelper.VALIDATE_JOIN_ACTION_NAME)) {
handleResponse(requestId, new TransportResponse.Empty());
} else {
super.onSendRequest(requestId, action, request, destination);
}
}
};
final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, threadPool,
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
x -> initialState.nodes().getLocalNode(),
clusterSettings, Collections.emptySet());
coordinator = new Coordinator("test_node", Settings.EMPTY, clusterSettings,
transportService, writableRegistry(),
ESAllocationTestCase.createAllocationService(Settings.EMPTY),
masterService,
() -> new InMemoryPersistedState(term, initialState), r -> emptyList(),
new NoOpClusterApplier(),
Collections.emptyList(),
random, (s, p, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE);
transportService.start();
transportService.acceptIncomingRequests();
transport = capturingTransport;
coordinator.start();
coordinator.startInitialJoin();
}
protected DiscoveryNode newNode(int i) {
return newNode(i, randomBoolean());
}
protected DiscoveryNode newNode(int i, boolean master) {
final Set<DiscoveryNodeRole> roles;
if (master) {
roles = Set.of(DiscoveryNodeRole.MASTER_ROLE);
} else {
roles = Set.of();
}
final String prefix = master ? "master_" : "data_";
return new DiscoveryNode(prefix + i, i + "", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
}
static class SimpleFuture extends BaseFuture<Void> {
final String description;
SimpleFuture(String description) {
this.description = description;
}
public void markAsDone() {
set(null);
}
public void markAsFailed(Throwable t) {
setException(t);
}
@Override
public String toString() {
return "future [" + description + "]";
}
}
private SimpleFuture joinNodeAsync(final JoinRequest joinRequest) {
final SimpleFuture future = new SimpleFuture("join of " + joinRequest + "]");
logger.debug("starting {}", future);
// clone the node before submitting to simulate an incoming join, which is guaranteed to have a new
// disco node object serialized off the network
try {
final RequestHandlerRegistry<JoinRequest> joinHandler = transport.getRequestHandlers()
.getHandler(JoinHelper.JOIN_ACTION_NAME);
final ActionListener<TransportResponse> listener = new ActionListener<>() {
@Override
public void onResponse(TransportResponse transportResponse) {
logger.debug("{} completed", future);
future.markAsDone();
}
@Override
public void onFailure(Exception e) {
logger.error(() -> new ParameterizedMessage("unexpected error for {}", future), e);
future.markAsFailed(e);
}
};
joinHandler.processMessageReceived(joinRequest, new TestTransportChannel(listener));
} catch (Exception e) {
logger.error(() -> new ParameterizedMessage("unexpected error for {}", future), e);
future.markAsFailed(e);
}
return future;
}
private void joinNode(final JoinRequest joinRequest) {
FutureUtils.get(joinNodeAsync(joinRequest));
}
private void joinNodeAndRun(final JoinRequest joinRequest) {
SimpleFuture fut = joinNodeAsync(joinRequest);
deterministicTaskQueue.runAllRunnableTasks();
assertTrue(fut.isDone());
FutureUtils.get(fut);
}
public void testJoinWithHigherTermElectsLeader() {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(randomFrom(node0, node1))));
assertFalse(isLocalNodeElectedMaster());
assertNull(coordinator.getStateForMasterService().nodes().getMasterNodeId());
long newTerm = initialTerm + randomLongBetween(1, 10);
SimpleFuture fut = joinNodeAsync(new JoinRequest(node1, newTerm,
Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion))));
assertEquals(Coordinator.Mode.LEADER, coordinator.getMode());
assertNull(coordinator.getStateForMasterService().nodes().getMasterNodeId());
deterministicTaskQueue.runAllRunnableTasks();
assertTrue(fut.isDone());
assertTrue(isLocalNodeElectedMaster());
assertTrue(coordinator.getStateForMasterService().nodes().isLocalNodeElectedMaster());
}
public void testJoinWithHigherTermButBetterStateGetsRejected() {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node1)));
assertFalse(isLocalNodeElectedMaster());
long newTerm = initialTerm + randomLongBetween(1, 10);
long higherVersion = initialVersion + randomLongBetween(1, 10);
expectThrows(CoordinationStateRejectedException.class,
() -> joinNodeAndRun(new JoinRequest(node1, newTerm,
Optional.of(new Join(node1, node0, newTerm, initialTerm, higherVersion)))));
assertFalse(isLocalNodeElectedMaster());
}
public void testJoinWithHigherTermButBetterStateStillElectsMasterThroughSelfJoin() {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node0)));
assertFalse(isLocalNodeElectedMaster());
long newTerm = initialTerm + randomLongBetween(1, 10);
long higherVersion = initialVersion + randomLongBetween(1, 10);
joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, higherVersion))));
assertTrue(isLocalNodeElectedMaster());
}
public void testJoinElectedLeader() {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node0)));
assertFalse(isLocalNodeElectedMaster());
long newTerm = initialTerm + randomLongBetween(1, 10);
joinNodeAndRun(new JoinRequest(node0, newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion))));
assertTrue(isLocalNodeElectedMaster());
assertFalse(clusterStateHasNode(node1));
joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion))));
assertTrue(isLocalNodeElectedMaster());
assertTrue(clusterStateHasNode(node1));
}
public void testJoinElectedLeaderWithHigherTerm() {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node0)));
long newTerm = initialTerm + randomLongBetween(1, 10);
joinNodeAndRun(new JoinRequest(node0, newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion))));
assertTrue(isLocalNodeElectedMaster());
long newerTerm = newTerm + randomLongBetween(1, 10);
joinNodeAndRun(new JoinRequest(node1, newerTerm, Optional.empty()));
assertThat(coordinator.getCurrentTerm(), greaterThanOrEqualTo(newerTerm));
assertTrue(isLocalNodeElectedMaster());
}
public void testJoinAccumulation() {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
DiscoveryNode node2 = newNode(2, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node2)));
assertFalse(isLocalNodeElectedMaster());
long newTerm = initialTerm + randomLongBetween(1, 10);
SimpleFuture futNode0 = joinNodeAsync(new JoinRequest(node0, newTerm, Optional.of(
new Join(node0, node0, newTerm, initialTerm, initialVersion))));
deterministicTaskQueue.runAllRunnableTasks();
assertFalse(futNode0.isDone());
assertFalse(isLocalNodeElectedMaster());
SimpleFuture futNode1 = joinNodeAsync(new JoinRequest(node1, newTerm, Optional.of(
new Join(node1, node0, newTerm, initialTerm, initialVersion))));
deterministicTaskQueue.runAllRunnableTasks();
assertFalse(futNode1.isDone());
assertFalse(isLocalNodeElectedMaster());
joinNodeAndRun(new JoinRequest(node2, newTerm, Optional.of(new Join(node2, node0, newTerm, initialTerm, initialVersion))));
assertTrue(isLocalNodeElectedMaster());
assertTrue(clusterStateHasNode(node1));
assertTrue(clusterStateHasNode(node2));
FutureUtils.get(futNode0);
FutureUtils.get(futNode1);
}
public void testJoinFollowerWithHigherTerm() throws Exception {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node0)));
long newTerm = initialTerm + randomLongBetween(1, 10);
handleStartJoinFrom(node1, newTerm);
handleFollowerCheckFrom(node1, newTerm);
long newerTerm = newTerm + randomLongBetween(1, 10);
joinNodeAndRun(new JoinRequest(node1, newerTerm,
Optional.of(new Join(node1, node0, newerTerm, initialTerm, initialVersion))));
assertTrue(isLocalNodeElectedMaster());
}
public void testJoinUpdateVotingConfigExclusion() throws Exception {
DiscoveryNode initialNode = newNode(0, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
CoordinationMetadata.VotingConfigExclusion votingConfigExclusion = new CoordinationMetadata.VotingConfigExclusion(
CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER, "knownNodeName");
setupFakeMasterServiceAndCoordinator(initialTerm, buildStateWithVotingConfigExclusion(initialNode, initialTerm,
initialVersion, votingConfigExclusion));
DiscoveryNode knownJoiningNode = new DiscoveryNode("knownNodeName", "newNodeId", buildNewFakeTransportAddress(),
emptyMap(), Set.of(DiscoveryNodeRole.MASTER_ROLE), Version.CURRENT);
long newTerm = initialTerm + randomLongBetween(1, 10);
long newerTerm = newTerm + randomLongBetween(1, 10);
joinNodeAndRun(new JoinRequest(knownJoiningNode, initialTerm,
Optional.of(new Join(knownJoiningNode, initialNode, newerTerm, initialTerm, initialVersion))));
assertTrue(MasterServiceTests.discoveryState(masterService).getVotingConfigExclusions().stream().anyMatch(exclusion -> {
return "knownNodeName".equals(exclusion.getNodeName()) && "newNodeId".equals(exclusion.getNodeId());
}));
}
private ClusterState buildStateWithVotingConfigExclusion(DiscoveryNode initialNode,
long initialTerm,
long initialVersion,
CoordinationMetadata.VotingConfigExclusion votingConfigExclusion) {
ClusterState initialState = initialState(initialNode, initialTerm, initialVersion,
new VotingConfiguration(Collections.singleton(initialNode.getId())));
Metadata newMetadata = Metadata.builder(initialState.metadata())
.coordinationMetadata(CoordinationMetadata.builder(initialState.coordinationMetadata())
.addVotingConfigExclusion(votingConfigExclusion)
.build())
.build();
return ClusterState.builder(initialState).metadata(newMetadata).build();
}
private void handleStartJoinFrom(DiscoveryNode node, long term) throws Exception {
final RequestHandlerRegistry<StartJoinRequest> startJoinHandler = transport.getRequestHandlers()
.getHandler(JoinHelper.START_JOIN_ACTION_NAME);
startJoinHandler.processMessageReceived(new StartJoinRequest(node, term), new TestTransportChannel(new ActionListener<>() {
@Override
public void onResponse(TransportResponse transportResponse) {
}
@Override
public void onFailure(Exception e) {
fail();
}
}));
deterministicTaskQueue.runAllRunnableTasks();
assertFalse(isLocalNodeElectedMaster());
assertThat(coordinator.getMode(), equalTo(Coordinator.Mode.CANDIDATE));
}
private void handleFollowerCheckFrom(DiscoveryNode node, long term) throws Exception {
final RequestHandlerRegistry<FollowersChecker.FollowerCheckRequest> followerCheckHandler = transport.getRequestHandlers()
.getHandler(FollowersChecker.FOLLOWER_CHECK_ACTION_NAME);
final TestTransportChannel channel = new TestTransportChannel(new ActionListener<>() {
@Override
public void onResponse(TransportResponse transportResponse) {
}
@Override
public void onFailure(Exception e) {
fail();
}
});
followerCheckHandler.processMessageReceived(new FollowersChecker.FollowerCheckRequest(term, node), channel);
// Will throw exception if failed
deterministicTaskQueue.runAllRunnableTasks();
assertFalse(isLocalNodeElectedMaster());
assertThat(coordinator.getMode(), equalTo(Coordinator.Mode.FOLLOWER));
}
public void testJoinFollowerFails() throws Exception {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node0)));
long newTerm = initialTerm + randomLongBetween(1, 10);
handleStartJoinFrom(node1, newTerm);
handleFollowerCheckFrom(node1, newTerm);
assertThat(expectThrows(CoordinationStateRejectedException.class,
() -> joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.empty()))).getMessage(),
containsString("join target is a follower"));
assertFalse(isLocalNodeElectedMaster());
}
public void testBecomeFollowerFailsPendingJoin() throws Exception {
DiscoveryNode node0 = newNode(0, true);
DiscoveryNode node1 = newNode(1, true);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupFakeMasterServiceAndCoordinator(initialTerm, initialState(node0, initialTerm, initialVersion,
VotingConfiguration.of(node1)));
long newTerm = initialTerm + randomLongBetween(1, 10);
SimpleFuture fut = joinNodeAsync(new JoinRequest(node0, newTerm,
Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion))));
deterministicTaskQueue.runAllRunnableTasks();
assertFalse(fut.isDone());
assertFalse(isLocalNodeElectedMaster());
handleFollowerCheckFrom(node1, newTerm);
assertFalse(isLocalNodeElectedMaster());
assertThat(expectThrows(CoordinationStateRejectedException.class,
() -> FutureUtils.get(fut)).getMessage(),
containsString("became follower"));
assertFalse(isLocalNodeElectedMaster());
}
public void testConcurrentJoining() {
List<DiscoveryNode> masterNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5))
.mapToObj(nodeId -> newNode(nodeId, true)).collect(Collectors.toList());
List<DiscoveryNode> otherNodes = IntStream.rangeClosed(masterNodes.size() + 1, masterNodes.size() + 1 + randomIntBetween(0, 5))
.mapToObj(nodeId -> newNode(nodeId, false)).collect(Collectors.toList());
List<DiscoveryNode> allNodes = Stream.concat(masterNodes.stream(), otherNodes.stream()).collect(Collectors.toList());
DiscoveryNode localNode = masterNodes.get(0);
VotingConfiguration votingConfiguration = new VotingConfiguration(randomValueOtherThan(singletonList(localNode),
() -> randomSubsetOf(randomIntBetween(1, masterNodes.size()), masterNodes)).stream()
.map(DiscoveryNode::getId).collect(Collectors.toSet()));
logger.info("Voting configuration: {}", votingConfiguration);
long initialTerm = randomLongBetween(1, 10);
long initialVersion = randomLongBetween(1, 10);
setupRealMasterServiceAndCoordinator(initialTerm, initialState(localNode, initialTerm, initialVersion, votingConfiguration));
long newTerm = initialTerm + randomLongBetween(1, 10);
// we need at least a quorum of voting nodes with a correct term and worse state
List<DiscoveryNode> successfulNodes;
do {
successfulNodes = randomSubsetOf(allNodes);
} while (votingConfiguration.hasQuorum(successfulNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toList()))
== false);
logger.info("Successful voting nodes: {}", successfulNodes);
List<JoinRequest> correctJoinRequests = successfulNodes.stream().map(
node -> new JoinRequest(node, newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion))))
.collect(Collectors.toList());
List<DiscoveryNode> possiblyUnsuccessfulNodes = new ArrayList<>(allNodes);
possiblyUnsuccessfulNodes.removeAll(successfulNodes);
logger.info("Possibly unsuccessful voting nodes: {}", possiblyUnsuccessfulNodes);
List<JoinRequest> possiblyFailingJoinRequests = possiblyUnsuccessfulNodes.stream().map(node -> {
if (randomBoolean()) {
// a correct request
return new JoinRequest(node, newTerm, Optional.of(new Join(node, localNode,
newTerm, initialTerm, initialVersion)));
} else if (randomBoolean()) {
// term too low
return new JoinRequest(node, newTerm, Optional.of(new Join(node, localNode,
randomLongBetween(0, initialTerm), initialTerm, initialVersion)));
} else {
// better state
return new JoinRequest(node, newTerm, Optional.of(new Join(node, localNode,
newTerm, initialTerm, initialVersion + randomLongBetween(1, 10))));
}
}).collect(Collectors.toList());
// duplicate some requests, which will be unsuccessful
possiblyFailingJoinRequests.addAll(randomSubsetOf(possiblyFailingJoinRequests));
CyclicBarrier barrier = new CyclicBarrier(correctJoinRequests.size() + possiblyFailingJoinRequests.size() + 1);
final Runnable awaitBarrier = () -> {
try {
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new RuntimeException(e);
}
};
final AtomicBoolean stopAsserting = new AtomicBoolean();
final Thread assertionThread = new Thread(() -> {
awaitBarrier.run();
while (stopAsserting.get() == false) {
coordinator.invariant();
}
}, "assert invariants");
final List<Thread> joinThreads = Stream.concat(correctJoinRequests.stream().map(joinRequest ->
new Thread(() -> {
awaitBarrier.run();
joinNode(joinRequest);
}, "process " + joinRequest)), possiblyFailingJoinRequests.stream().map(joinRequest ->
new Thread(() -> {
awaitBarrier.run();
try {
joinNode(joinRequest);
} catch (CoordinationStateRejectedException e) {
// ignore - these requests are expected to fail
}
}, "process " + joinRequest))).collect(Collectors.toList());
assertionThread.start();
joinThreads.forEach(Thread::start);
joinThreads.forEach(t -> {
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
stopAsserting.set(true);
try {
assertionThread.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster());
for (DiscoveryNode successfulNode : successfulNodes) {
assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode));
assertFalse(successfulNode + " voted for master", coordinator.missingJoinVoteFrom(successfulNode));
}
}
private boolean isLocalNodeElectedMaster() {
return MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster();
}
private boolean clusterStateHasNode(DiscoveryNode node) {
return node.equals(MasterServiceTests.discoveryState(masterService).nodes().get(node.getId()));
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.ipc.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.UserGroupInformation;
import java.io.*;
import java.net.*;
import java.util.Collection;
import java.util.Iterator;
/**********************************************************
* NameNode serves as both directory namespace manager and
* "inode table" for the Hadoop DFS. There is a single NameNode
* running in any DFS deployment. (Well, except when there
* is a second backup/failover NameNode.)
*
* The NameNode controls two critical tables:
* 1) filename->blocksequence (namespace)
* 2) block->machinelist ("inodes")
*
* The first table is stored on disk and is very precious.
* The second table is rebuilt every time the NameNode comes
* up.
*
* 'NameNode' refers to both this class as well as the 'NameNode server'.
* The 'FSNamesystem' class actually performs most of the filesystem
* management. The majority of the 'NameNode' class itself is concerned
* with exposing the IPC interface to the outside world, plus some
* configuration management.
*
* NameNode implements the ClientProtocol interface, which allows
* clients to ask for DFS services. ClientProtocol is not
* designed for direct use by authors of DFS client code. End-users
* should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
*
* NameNode also implements the DatanodeProtocol interface, used by
* DataNode programs that actually store DFS data blocks. These
* methods are invoked repeatedly and automatically by all the
* DataNodes in a DFS deployment.
*
* NameNode also implements the NamenodeProtocol interface, used by
* secondary namenodes or rebalancing processes to get partial namenode's
* state, for example partial blocksMap etc.
**********************************************************/
public class NameNode implements ClientProtocol, DatanodeProtocol,
NamenodeProtocol, FSConstants {
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
return ClientProtocol.versionID;
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
return NamenodeProtocol.versionID;
} else {
throw new IOException("Unknown protocol to name node: " + protocol);
}
}
public static final int DEFAULT_PORT = 8020;
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
public FSNamesystem namesystem;
private Server server;
private Thread emptier;
private int handlerCount = 2;
private boolean supportAppends = true; // allow appending to hdfs files
private InetSocketAddress nameNodeAddress = null;
/** only used for testing purposes */
private boolean stopRequested = false;
/** Format a new filesystem. Destroys any filesystem that may already
* exist at this location. **/
public static void format(Configuration conf) throws IOException {
format(conf, false);
}
static NameNodeMetrics myMetrics;
public static NameNodeMetrics getNameNodeMetrics() {
return myMetrics;
}
public static InetSocketAddress getAddress(String address) {
return NetUtils.createSocketAddr(address, DEFAULT_PORT);
}
public static InetSocketAddress getAddress(Configuration conf) {
return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
}
public static URI getUri(InetSocketAddress namenode) {
int port = namenode.getPort();
String portString = port == DEFAULT_PORT ? "" : (":"+port);
return URI.create("hdfs://"+ namenode.getHostName()+portString);
}
/**
* Initialize the server
*
* @param address hostname:port to bind to
* @param conf the configuration
*/
private void initialize(String address, Configuration conf) throws IOException {
InetSocketAddress socAddr = NameNode.getAddress(address);
this.supportAppends = conf.getBoolean("dfs.support.append", false);
this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
handlerCount, false, conf);
// The rpc-server port can be ephemeral... ensure we have the correct info
this.nameNodeAddress = this.server.getListenerAddress();
FileSystem.setDefaultUri(conf, getUri(nameNodeAddress));
LOG.info("Namenode up at: " + this.nameNodeAddress);
myMetrics = new NameNodeMetrics(conf, this);
this.namesystem = new FSNamesystem(this, conf);
this.server.start(); //start RPC server
this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
this.emptier.setDaemon(true);
this.emptier.start();
}
/**
* Start NameNode.
* <p>
* The name-node can be started with one of the following startup options:
* <ul>
* <li>{@link org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
* <li>{@link org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption#FORMAT FORMAT} - format name node</li>
* <li>{@link org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption#UPGRADE UPGRADE} - start the cluster
* upgrade and create a snapshot of the current file system state</li>
* <li>{@link org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption#ROLLBACK ROLLBACK} - roll the
* cluster back to the previous state</li>
* </ul>
* The option is passed via configuration field:
* <tt>dfs.namenode.startup</tt>
*
* The conf will be modified to reflect the actual ports on which
* the NameNode is up and running if the user passes the port as
* <code>zero</code> in the conf.
*
* @param conf confirguration
* @throws IOException
*/
public NameNode(Configuration conf) throws IOException {
this(FileSystem.getDefaultUri(conf).getAuthority(), conf);
}
/**
* Create a NameNode at the specified location and start it.
*
* The conf will be modified to reflect the actual ports on which
* the NameNode is up and running if the user passes the port as
* <code>zero</code>.
*/
public NameNode(String bindAddress,
Configuration conf
) throws IOException {
try {
initialize(bindAddress, conf);
} catch (IOException e) {
this.stop();
throw e;
}
}
/**
* Wait for service to finish.
* (Normally, it runs forever.)
*/
public void join() {
try {
this.server.join();
} catch (InterruptedException ie) {
}
}
/**
* Stop all NameNode threads and wait for all to finish.
*/
public void stop() {
if (stopRequested)
return;
stopRequested = true;
if(namesystem != null) namesystem.close();
if(emptier != null) emptier.interrupt();
if(server != null) server.stop();
if (myMetrics != null) {
myMetrics.shutdown();
}
if (namesystem != null) {
namesystem.shutdown();
}
}
/////////////////////////////////////////////////////
// NamenodeProtocol
/////////////////////////////////////////////////////
/**
* return a list of blocks & their locations on <code>datanode</code> whose
* total size is <code>size</code>
*
* @param datanode on which blocks are located
* @param size total size of blocks
*/
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
if(size <= 0) {
throw new IllegalArgumentException(
"Unexpected not positive size: "+size);
}
return namesystem.getBlocks(datanode, size);
}
/////////////////////////////////////////////////////
// ClientProtocol
/////////////////////////////////////////////////////
/** {@inheritDoc} */
public LocatedBlocks getBlockLocations(String src,
long offset,
long length) throws IOException {
myMetrics.numGetBlockLocations.inc();
return namesystem.getBlockLocations(getClientMachine(),
src, offset, length);
}
private static String getClientMachine() {
String clientMachine = Server.getRemoteAddress();
if (clientMachine == null) {
clientMachine = "";
}
return clientMachine;
}
/** {@inheritDoc} */
public void create(String src,
FsPermission masked,
String clientName,
boolean overwrite,
short replication,
long blockSize
) throws IOException {
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.create: file "
+src+" for "+clientName+" at "+clientMachine);
}
if (!checkPathLength(src)) {
throw new IOException("create: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.startFile(src,
new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
null, masked),
clientName, clientMachine, overwrite, replication, blockSize);
myMetrics.numFilesCreated.inc();
myMetrics.numCreateFileOps.inc();
}
/** {@inheritDoc} */
public LocatedBlock append(String src, String clientName) throws IOException {
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.append: file "
+src+" for "+clientName+" at "+clientMachine);
}
if (supportAppends == false) {
throw new IOException("Append to hdfs not supported." +
" Please refer to dfs.support.append configuration parameter.");
}
LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
myMetrics.numFilesAppended.inc();
return info;
}
/** {@inheritDoc} */
public boolean setReplication(String src,
short replication
) throws IOException {
return namesystem.setReplication(src, replication);
}
/** {@inheritDoc} */
public void setPermission(String src, FsPermission permissions
) throws IOException {
namesystem.setPermission(src, permissions);
}
/** {@inheritDoc} */
public void setOwner(String src, String username, String groupname
) throws IOException {
namesystem.setOwner(src, username, groupname);
}
/**
*/
public LocatedBlock addBlock(String src,
String clientName) throws IOException {
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
+src+" for "+clientName);
LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, clientName);
if (locatedBlock != null)
myMetrics.numAddBlockOps.inc();
return locatedBlock;
}
/**
* The client needs to give up on the block.
*/
public void abandonBlock(Block b, String src, String holder
) throws IOException {
stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
+b+" of file "+src);
if (!namesystem.abandonBlock(b, src, holder)) {
throw new IOException("Cannot abandon block during write to " + src);
}
}
/** {@inheritDoc} */
public boolean complete(String src, String clientName) throws IOException {
stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName);
CompleteFileStatus returnCode = namesystem.completeFile(src, clientName);
if (returnCode == CompleteFileStatus.STILL_WAITING) {
return false;
} else if (returnCode == CompleteFileStatus.COMPLETE_SUCCESS) {
return true;
} else {
throw new IOException("Could not complete write to file " + src + " by " + clientName);
}
}
/**
* The client has detected an error on the specified located blocks
* and is reporting them to the server. For now, the namenode will
* mark the block as corrupt. In the future we might
* check the blocks are actually corrupt.
*/
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
for (int i = 0; i < blocks.length; i++) {
Block blk = blocks[i].getBlock();
DatanodeInfo[] nodes = blocks[i].getLocations();
for (int j = 0; j < nodes.length; j++) {
DatanodeInfo dn = nodes[j];
namesystem.markBlockAsCorrupt(blk, dn);
}
}
}
/** {@inheritDoc} */
public long nextGenerationStamp(Block block) throws IOException{
return namesystem.nextGenerationStampForBlock(block);
}
/** {@inheritDoc} */
public void commitBlockSynchronization(Block block,
long newgenerationstamp, long newlength,
boolean closeFile, boolean deleteblock, DatanodeID[] newtargets
) throws IOException {
namesystem.commitBlockSynchronization(block,
newgenerationstamp, newlength, closeFile, deleteblock, newtargets);
}
public long getPreferredBlockSize(String filename) throws IOException {
return namesystem.getPreferredBlockSize(filename);
}
/**
*/
public boolean rename(String src, String dst) throws IOException {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
boolean ret = namesystem.renameTo(src, dst);
if (ret) {
myMetrics.numFilesRenamed.inc();
}
return ret;
}
/**
*/
@Deprecated
public boolean delete(String src) throws IOException {
return delete(src, true);
}
/** {@inheritDoc} */
public boolean delete(String src, boolean recursive) throws IOException {
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
+ ", recursive=" + recursive);
}
boolean ret = namesystem.delete(src, recursive);
if (ret)
myMetrics.numDeleteFileOps.inc();
return ret;
}
/**
* Check path length does not exceed maximum. Returns true if
* length and depth are okay. Returns false if length is too long
* or depth is too great.
*
*/
private boolean checkPathLength(String src) {
Path srcPath = new Path(src);
return (src.length() <= MAX_PATH_LENGTH &&
srcPath.depth() <= MAX_PATH_DEPTH);
}
/** {@inheritDoc} */
public boolean mkdirs(String src, FsPermission masked) throws IOException {
stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
if (!checkPathLength(src)) {
throw new IOException("mkdirs: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
return namesystem.mkdirs(src,
new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
null, masked));
}
/**
*/
public void renewLease(String clientName) throws IOException {
namesystem.renewLease(clientName);
}
/**
*/
public FileStatus[] getListing(String src) throws IOException {
FileStatus[] files = namesystem.getListing(src);
if (files != null) {
myMetrics.numGetListingOps.inc();
}
return files;
}
/**
* Get the file info for a specific file.
* @param src The string representation of the path to the file
* @throws IOException if permission to access file is denied by the system
* @return object containing information regarding the file
* or null if file not found
*/
public FileStatus getFileInfo(String src) throws IOException {
return namesystem.getFileInfo(src);
}
/** @inheritDoc */
public long[] getStats() throws IOException {
return namesystem.getStats();
}
/**
*/
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
DatanodeInfo results[] = namesystem.datanodeReport(type);
if (results == null ) {
throw new IOException("Cannot find datanode report");
}
return results;
}
/**
* @inheritDoc
*/
public boolean setSafeMode(SafeModeAction action) throws IOException {
return namesystem.setSafeMode(action);
}
/**
* Is the cluster currently in safe mode?
*/
public boolean isInSafeMode() {
return namesystem.isInSafeMode();
}
/*
* Refresh the list of datanodes that the namenode should allow to
* connect. Re-reads conf by creating new Configuration object and
* uses the files list in the configuration to update the list.
*/
public void refreshNodes() throws IOException {
namesystem.refreshNodes(new Configuration());
}
/**
* Returns the size of the current edit log.
*/
public long getEditLogSize() throws IOException {
return namesystem.getEditLogSize();
}
/**
* Roll the edit log.
*/
public CheckpointSignature rollEditLog() throws IOException {
return namesystem.rollEditLog();
}
/**
* Roll the image
*/
public void rollFsImage() throws IOException {
namesystem.rollFSImage();
}
public void finalizeUpgrade() throws IOException {
namesystem.finalizeUpgrade();
}
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action
) throws IOException {
return namesystem.distributedUpgradeProgress(action);
}
/**
* Dumps namenode state into specified file
*/
public void metaSave(String filename) throws IOException {
namesystem.metaSave(filename);
}
/** {@inheritDoc} */
public ContentSummary getContentSummary(String path) throws IOException {
return namesystem.getContentSummary(path);
}
/** {@inheritDoc} */
public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
throws IOException {
namesystem.setQuota(path, namespaceQuota, diskspaceQuota);
}
/** {@inheritDoc} */
public void fsync(String src, String clientName) throws IOException {
namesystem.fsync(src, clientName);
}
/** @inheritDoc */
public void setTimes(String src, long mtime, long atime) throws IOException {
namesystem.setTimes(src, mtime, atime);
}
////////////////////////////////////////////////////////////////
// DatanodeProtocol
////////////////////////////////////////////////////////////////
/**
*/
public DatanodeRegistration register(DatanodeRegistration nodeReg
) throws IOException {
verifyVersion(nodeReg.getVersion());
namesystem.registerDatanode(nodeReg);
return nodeReg;
}
/**
* Data node notify the name node that it is alive
* Return an array of block-oriented commands for the datanode to execute.
* This will be either a transfer or a delete operation.
*/
public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
long capacity,
long dfsUsed,
long remaining,
int xmitsInProgress,
int xceiverCount) throws IOException {
verifyRequest(nodeReg);
return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
xceiverCount, xmitsInProgress);
}
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
long[] blocks) throws IOException {
verifyRequest(nodeReg);
BlockListAsLongs blist = new BlockListAsLongs(blocks);
stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks");
namesystem.processReport(nodeReg, blist);
if (getFSImage().isUpgradeFinalized())
return DatanodeCommand.FINALIZE;
return null;
}
public void blockReceived(DatanodeRegistration nodeReg,
Block blocks[],
String delHints[]) throws IOException {
verifyRequest(nodeReg);
stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
+"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
for (int i = 0; i < blocks.length; i++) {
namesystem.blockReceived(nodeReg, blocks[i], delHints[i]);
}
}
/**
*/
public void errorReport(DatanodeRegistration nodeReg,
int errorCode,
String msg) throws IOException {
// Log error message from datanode
String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
LOG.info("Error report from " + dnName + ": " + msg);
if (errorCode == DatanodeProtocol.NOTIFY) {
return;
}
verifyRequest(nodeReg);
if (errorCode == DatanodeProtocol.DISK_ERROR) {
namesystem.removeDatanode(nodeReg);
}
}
public NamespaceInfo versionRequest() throws IOException {
return namesystem.getNamespaceInfo();
}
public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
return namesystem.processDistributedUpgradeCommand(comm);
}
/**
* Verify request.
*
* Verifies correctness of the datanode version, registration ID, and
* if the datanode does not need to be shutdown.
*
* @param nodeReg data node registration
* @throws IOException
*/
public void verifyRequest(DatanodeRegistration nodeReg) throws IOException {
verifyVersion(nodeReg.getVersion());
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID()))
throw new UnregisteredDatanodeException(nodeReg);
}
/**
* Verify version.
*
* @param version
* @throws IOException
*/
public void verifyVersion(int version) throws IOException {
if (version != LAYOUT_VERSION)
throw new IncorrectVersionException(version, "data node");
}
/**
* Returns the name of the fsImage file
*/
public File getFsImageName() throws IOException {
return getFSImage().getFsImageName();
}
public FSImage getFSImage() {
return namesystem.dir.fsImage;
}
/**
* Returns the name of the fsImage file uploaded by periodic
* checkpointing
*/
public File[] getFsImageNameCheckpoint() throws IOException {
return getFSImage().getFsImageNameCheckpoint();
}
/**
* Returns the address on which the NameNodes is listening to.
* @return the address on which the NameNodes is listening to.
*/
public InetSocketAddress getNameNodeAddress() {
return nameNodeAddress;
}
NetworkTopology getNetworkTopology() {
return this.namesystem.clusterMap;
}
/**
* Verify that configured directories exist, then
* Interactively confirm that formatting is desired
* for each existing directory and format them.
*
* @param conf
* @param isConfirmationNeeded
* @return true if formatting was aborted, false otherwise
* @throws IOException
*/
private static boolean format(Configuration conf,
boolean isConfirmationNeeded
) throws IOException {
Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
Collection<File> editDirsToFormat =
FSNamesystem.getNamespaceEditsDirs(conf);
for(Iterator<File> it = dirsToFormat.iterator(); it.hasNext();) {
File curDir = it.next();
if (!curDir.exists())
continue;
if (isConfirmationNeeded) {
System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
if (!(System.in.read() == 'Y')) {
System.err.println("Format aborted in "+ curDir);
return true;
}
while(System.in.read() != '\n'); // discard the enter-key
}
}
FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat,
editDirsToFormat), conf);
nsys.dir.fsImage.format();
return false;
}
private static boolean finalize(Configuration conf,
boolean isConfirmationNeeded
) throws IOException {
Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
Collection<File> editDirsToFormat =
FSNamesystem.getNamespaceEditsDirs(conf);
FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat,
editDirsToFormat), conf);
System.err.print(
"\"finalize\" will remove the previous state of the files system.\n"
+ "Recent upgrade will become permanent.\n"
+ "Rollback option will not be available anymore.\n");
if (isConfirmationNeeded) {
System.err.print("Finalize filesystem state ? (Y or N) ");
if (!(System.in.read() == 'Y')) {
System.err.println("Finalize aborted.");
return true;
}
while(System.in.read() != '\n'); // discard the enter-key
}
nsys.dir.fsImage.finalizeUpgrade();
return false;
}
private static void printUsage() {
System.err.println(
"Usage: java NameNode [" +
StartupOption.FORMAT.getName() + "] | [" +
StartupOption.UPGRADE.getName() + "] | [" +
StartupOption.ROLLBACK.getName() + "] | [" +
StartupOption.FINALIZE.getName() + "] | [" +
StartupOption.IMPORT.getName() + "]");
}
private static StartupOption parseArguments(String args[],
Configuration conf) {
int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) {
String cmd = args[i];
if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMAT;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE;
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FINALIZE;
} else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.IMPORT;
} else
return null;
}
setStartupOption(conf, startOpt);
return startOpt;
}
private static void setStartupOption(Configuration conf, StartupOption opt) {
conf.set("dfs.namenode.startup", opt.toString());
}
static StartupOption getStartupOption(Configuration conf) {
return StartupOption.valueOf(conf.get("dfs.namenode.startup",
StartupOption.REGULAR.toString()));
}
public static NameNode createNameNode(String argv[],
Configuration conf) throws IOException {
if (conf == null)
conf = new Configuration();
StartupOption startOpt = parseArguments(argv, conf);
if (startOpt == null) {
printUsage();
return null;
}
switch (startOpt) {
case FORMAT:
boolean aborted = format(conf, true);
System.exit(aborted ? 1 : 0);
case FINALIZE:
aborted = finalize(conf, true);
System.exit(aborted ? 1 : 0);
default:
}
NameNode namenode = new NameNode(conf);
return namenode;
}
/**
*/
public static void main(String argv[]) throws Exception {
try {
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
NameNode namenode = createNameNode(argv, null);
if (namenode != null)
namenode.join();
} catch (Throwable e) {
LOG.error(StringUtils.stringifyException(e));
System.exit(-1);
}
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tez.examples;
import java.io.IOException;
import java.util.StringTokenizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.ToolRunner;
import org.apache.tez.client.TezClient;
import org.apache.tez.dag.api.DAG;
import org.apache.tez.dag.api.DataSinkDescriptor;
import org.apache.tez.dag.api.DataSourceDescriptor;
import org.apache.tez.dag.api.Edge;
import org.apache.tez.dag.api.ProcessorDescriptor;
import org.apache.tez.dag.api.TezConfiguration;
import org.apache.tez.dag.api.Vertex;
import org.apache.tez.mapreduce.input.MRInput;
import org.apache.tez.mapreduce.output.MROutput;
import org.apache.tez.mapreduce.processor.SimpleMRProcessor;
import org.apache.tez.runtime.api.ProcessorContext;
import org.apache.tez.runtime.library.api.KeyValueReader;
import org.apache.tez.runtime.library.api.KeyValueWriter;
import org.apache.tez.runtime.library.api.KeyValuesReader;
import org.apache.tez.runtime.library.conf.OrderedPartitionedKVEdgeConfig;
import org.apache.tez.runtime.library.partitioner.HashPartitioner;
import org.apache.tez.runtime.library.processor.SimpleProcessor;
import com.google.common.base.Preconditions;
/**
* Simple example to perform WordCount using Tez API's. WordCount is the
* HelloWorld program of distributed data processing and counts the number
* of occurrences of a word in a distributed text data set.
*/
public class WordCount extends TezExampleBase {
static String INPUT = "Input";
static String OUTPUT = "Output";
static String TOKENIZER = "Tokenizer";
static String SUMMATION = "Summation";
private static final Logger LOG = LoggerFactory.getLogger(WordCount.class);
/*
* Example code to write a processor in Tez.
* Processors typically apply the main application logic to the data.
* TokenProcessor tokenizes the input data.
* It uses an input that provide a Key-Value reader and writes
* output to a Key-Value writer. The processor inherits from SimpleProcessor
* since it does not need to handle any advanced constructs for Processors.
*/
public static class TokenProcessor extends SimpleProcessor {
IntWritable one = new IntWritable(1);
Text word = new Text();
public TokenProcessor(ProcessorContext context) {
super(context);
}
@Override
public void run() throws Exception {
Preconditions.checkArgument(getInputs().size() == 1);
Preconditions.checkArgument(getOutputs().size() == 1);
// the recommended approach is to cast the reader/writer to a specific type instead
// of casting the input/output. This allows the actual input/output type to be replaced
// without affecting the semantic guarantees of the data type that are represented by
// the reader and writer.
// The inputs/outputs are referenced via the names assigned in the DAG.
KeyValueReader kvReader = (KeyValueReader) getInputs().get(INPUT).getReader();
KeyValueWriter kvWriter = (KeyValueWriter) getOutputs().get(SUMMATION).getWriter();
while (kvReader.next()) {
StringTokenizer itr = new StringTokenizer(kvReader.getCurrentValue().toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
// Count 1 every time a word is observed. Word is the key a 1 is the value
kvWriter.write(word, one);
}
}
}
}
/*
* Example code to write a processor that commits final output to a data sink
* The SumProcessor aggregates the sum of individual word counts generated by
* the TokenProcessor.
* The SumProcessor is connected to a DataSink. In this case, its an Output that
* writes the data via an OutputFormat to a data sink (typically HDFS). Thats why
* it derives from SimpleMRProcessor that takes care of handling the necessary
* output commit operations that makes the final output available for consumers.
*/
public static class SumProcessor extends SimpleMRProcessor {
public SumProcessor(ProcessorContext context) {
super(context);
}
@Override
public void run() throws Exception {
Preconditions.checkArgument(getInputs().size() == 1);
Preconditions.checkArgument(getOutputs().size() == 1);
KeyValueWriter kvWriter = (KeyValueWriter) getOutputs().get(OUTPUT).getWriter();
// The KeyValues reader provides all values for a given key. The aggregation of values per key
// is done by the LogicalInput. Since the key is the word and the values are its counts in
// the different TokenProcessors, summing all values per key provides the sum for that word.
KeyValuesReader kvReader = (KeyValuesReader) getInputs().get(TOKENIZER).getReader();
while (kvReader.next()) {
Text word = (Text) kvReader.getCurrentKey();
int sum = 0;
for (Object value : kvReader.getCurrentValues()) {
sum += ((IntWritable) value).get();
}
kvWriter.write(word, new IntWritable(sum));
}
// deriving from SimpleMRProcessor takes care of committing the output
// It automatically invokes the commit logic for the OutputFormat if necessary.
}
}
private DAG createDAG(TezConfiguration tezConf, String inputPath, String outputPath,
int numPartitions) throws IOException {
// Create the descriptor that describes the input data to Tez. Using MRInput to read text
// data from the given input path. The TextInputFormat is used to read the text data.
DataSourceDescriptor dataSource = MRInput.createConfigBuilder(new Configuration(tezConf),
TextInputFormat.class, inputPath).groupSplits(!isDisableSplitGrouping()).build();
// Create a descriptor that describes the output data to Tez. Using MROoutput to write text
// data to the given output path. The TextOutputFormat is used to write the text data.
DataSinkDescriptor dataSink = MROutput.createConfigBuilder(new Configuration(tezConf),
TextOutputFormat.class, outputPath).build();
// Create a vertex that reads the data from the data source and tokenizes it using the
// TokenProcessor. The number of tasks that will do the work for this vertex will be decided
// using the information provided by the data source descriptor.
Vertex tokenizerVertex = Vertex.create(TOKENIZER, ProcessorDescriptor.create(
TokenProcessor.class.getName())).addDataSource(INPUT, dataSource);
// Create the edge that represents the movement and semantics of data between the producer
// Tokenizer vertex and the consumer Summation vertex. In order to perform the summation in
// parallel the tokenized data will be partitioned by word such that a given word goes to the
// same partition. The counts for the words should be grouped together per word. To achieve this
// we can use an edge that contains an input/output pair that handles partitioning and grouping
// of key value data. We use the helper OrderedPartitionedKVEdgeConfig to create such an
// edge. Internally, it sets up matching Tez inputs and outputs that can perform this logic.
// We specify the key, value and partitioner type. Here the key type is Text (for word), the
// value type is IntWritable (for count) and we using a hash based partitioner. This is a helper
// object. The edge can be configured by configuring the input, output etc individually without
// using this helper. The setFromConfiguration call is optional and allows overriding the config
// options with command line parameters.
OrderedPartitionedKVEdgeConfig edgeConf = OrderedPartitionedKVEdgeConfig
.newBuilder(Text.class.getName(), IntWritable.class.getName(),
HashPartitioner.class.getName())
.setFromConfiguration(tezConf)
.build();
// Create a vertex that reads the tokenized data and calculates the sum using the SumProcessor.
// The number of tasks that do the work of this vertex depends on the number of partitions used
// to distribute the sum processing. In this case, its been made configurable via the
// numPartitions parameter.
Vertex summationVertex = Vertex.create(SUMMATION,
ProcessorDescriptor.create(SumProcessor.class.getName()), numPartitions)
.addDataSink(OUTPUT, dataSink);
// No need to add jar containing this class as assumed to be part of the Tez jars. Otherwise
// we would have to add the jars for this code as local files to the vertices.
// Create DAG and add the vertices. Connect the producer and consumer vertices via the edge
DAG dag = DAG.create("WordCount");
dag.addVertex(tokenizerVertex)
.addVertex(summationVertex)
.addEdge(
Edge.create(tokenizerVertex, summationVertex, edgeConf.createDefaultEdgeProperty()));
return dag;
}
@Override
protected void printUsage() {
System.err.println("Usage: " + " wordcount in out [numPartitions]");
}
@Override
protected int validateArgs(String[] otherArgs) {
if (otherArgs.length < 2 || otherArgs.length > 3) {
return 2;
}
return 0;
}
@Override
protected int runJob(String[] args, TezConfiguration tezConf,
TezClient tezClient) throws Exception {
DAG dag = createDAG(tezConf, args[0], args[1],
args.length == 3 ? Integer.parseInt(args[2]) : 1);
LOG.info("Running WordCount");
return runDag(dag, isCountersLog(), LOG);
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new WordCount(), args);
System.exit(res);
}
}
|
|
/*
* Copyright Terracotta, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.terracotta.dynamic_config.cli.api.command;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.terracotta.common.struct.Measure;
import org.terracotta.common.struct.TimeUnit;
import org.terracotta.dynamic_config.api.model.Cluster;
import org.terracotta.dynamic_config.api.model.FailoverPriority;
import org.terracotta.dynamic_config.api.model.Identifier;
import org.terracotta.dynamic_config.api.model.Node;
import org.terracotta.dynamic_config.api.model.Node.Endpoint;
import org.terracotta.dynamic_config.api.model.PropertyHolder;
import org.terracotta.dynamic_config.api.model.Scope;
import org.terracotta.dynamic_config.api.model.Stripe;
import org.terracotta.dynamic_config.api.model.UID;
import org.terracotta.dynamic_config.api.model.nomad.NodeRemovalNomadChange;
import org.terracotta.dynamic_config.api.model.nomad.StripeRemovalNomadChange;
import org.terracotta.dynamic_config.api.model.nomad.TopologyNomadChange;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import static java.lang.System.lineSeparator;
import static org.terracotta.dynamic_config.api.model.FailoverPriority.Type.CONSISTENCY;
import static org.terracotta.dynamic_config.cli.api.converter.OperationType.NODE;
import static org.terracotta.dynamic_config.cli.api.converter.OperationType.STRIPE;
/**
* @author Mathieu Carbou
*/
public class DetachAction extends TopologyAction {
private static final Logger LOGGER = LoggerFactory.getLogger(DetachAction.class);
protected Measure<TimeUnit> stopWaitTime = Measure.of(120, TimeUnit.SECONDS);
protected Measure<TimeUnit> stopDelay = Measure.of(2, TimeUnit.SECONDS);
protected Identifier sourceIdentifier;
protected final Collection<Endpoint> onlineNodesToRemove = new ArrayList<>(1);
protected PropertyHolder source;
protected Stripe stripeToDetach;
public void setStopWaitTime(Measure<TimeUnit> stopWaitTime) {
this.stopWaitTime = stopWaitTime;
}
public void setStopDelay(Measure<TimeUnit> stopDelay) {
this.stopDelay = stopDelay;
}
public void setSourceIdentifier(Identifier sourceIdentifier) {
this.sourceIdentifier = sourceIdentifier;
}
@Override
protected void validate() {
super.validate();
if (destinationCluster.getNodeCount() == 1) {
throw new IllegalStateException("Unable to detach since destination cluster contains only 1 node");
}
if (operationType == NODE) {
source = sourceIdentifier.findObject(destinationCluster, Scope.NODE)
.orElseThrow(() -> new IllegalStateException("Source: " + sourceIdentifier + " is not part of cluster: " + destinationCluster.toShapeString()));
if (destination.getNodeUID().equals(source.getUID())) {
throw new IllegalArgumentException("The destination and the source nodes must not be the same");
}
if (!destinationCluster.inSameStripe(source.getUID(), destination.getNodeUID()).isPresent()) {
throw new IllegalStateException("Source node: " + sourceIdentifier + " is not present in the same stripe as destination: " + destination);
}
Stripe destinationStripe = destinationCluster.getStripeByNode(destination.getNodeUID()).get();
if (destinationStripe.getNodeCount() == 1) {
throw new IllegalStateException("Unable to detach since destination stripe contains only 1 node");
}
FailoverPriority failoverPriority = destinationCluster.getFailoverPriority().orElse(null);
if (failoverPriority != null && failoverPriority.getType() == CONSISTENCY) {
int voterCount = failoverPriority.getVoters();
int nodeCount = destinationStripe.getNodes().size();
int sum = voterCount + nodeCount;
if (sum % 2 != 0) {
LOGGER.warn(lineSeparator() +
"===================================================================================" + lineSeparator() +
"IMPORTANT: The sum (" + sum + ") of voter count (" + voterCount + ") and number of nodes " +
"(" + nodeCount + ") in this stripe " + lineSeparator() +
"is an odd number, which will become even with the removal of node " + sourceIdentifier + "." + lineSeparator() +
"An even-numbered configuration is more likely to experience split-brain situations." + lineSeparator() +
"===================================================================================" + lineSeparator());
}
}
// we only prevent detaching nodes if some remaining nodes must be restarted
for (Endpoint endpoint : destinationOnlineNodes.keySet()) {
if (!endpoint.getNodeUID().equals(source.getUID())) {
// prevent any topology change if a configuration change has been made through Nomad, requiring a restart, but nodes were not restarted yet
// we only check the remaining nodes, not the departing nodes.
validateLogOrFail(
() -> !mustBeRestarted(endpoint),
"Impossible to do any topology change. Node: " + endpoint + " is waiting to be restarted to apply some pending changes. Please refer to the Troubleshooting Guide for more help.");
}
}
// when we want to detach a node
markNodeForRemoval(source.getUID());
} else {
source = sourceIdentifier.findObject(destinationCluster, Scope.STRIPE)
.orElseThrow(() -> new IllegalStateException("Source: " + sourceIdentifier + " is not part of cluster: " + destinationCluster.toShapeString()));
stripeToDetach = destinationCluster.getStripe(source.getUID()).get();
if (stripeToDetach.containsNode(destination.getNodeUID())) {
throw new IllegalStateException("Source: " + sourceIdentifier + " and destination: " + destination + " are part of the same stripe: " + stripeToDetach.toShapeString());
}
if (destinationClusterActivated) {
if (destinationCluster.getStripeId(source.getUID()).getAsInt() == 1) {
throw new IllegalStateException("Removing the leading stripe is not allowed");
}
}
// we only prevent detaching nodes if some remaining nodes must be restarted
for (Endpoint endpoint : destinationOnlineNodes.keySet()) {
if (!stripeToDetach.containsNode(endpoint.getNodeUID())) {
// prevent any topology change if a configuration change has been made through Nomad, requiring a restart, but nodes were not restarted yet
// we only check the remaining nodes, not the departing nodes.
validateLogOrFail(
() -> !mustBeRestarted(endpoint),
"Impossible to do any topology change. Node: " + endpoint + " is waiting to be restarted to apply some pending changes. Please refer to the Troubleshooting Guide for more help.");
}
}
// when we want to detach a stripe, we detach all the nodes of the stripe
stripeToDetach.getNodes().stream().map(Node::getUID).forEach(this::markNodeForRemoval);
}
// When the operation type is node, the nodes being detached should be stopped first manually
// But if the operation type is stripe, the stripes being detached are stopped automatically after they're removed
if (operationType == NODE) {
if (!onlineNodesToRemove.isEmpty() && areAllNodesActivated(onlineNodesToRemove)) {
validateLogOrFail(onlineNodesToRemove::isEmpty, "Nodes to be detached: " + toString(onlineNodesToRemove) + " are online. " +
"Nodes must be safely shutdown first. Please refer to the Troubleshooting Guide for more help.");
}
}
}
@Override
protected Cluster updateTopology() {
Cluster cluster = destinationCluster.clone();
switch (operationType) {
case NODE: {
output.info("Detaching node: {} from cluster: {}", source.getName(), destinationCluster.getName());
cluster.removeNode(source.getUID());
break;
}
case STRIPE: {
output.info("Detaching stripe: {} from cluster: {}", source.getName(), destinationCluster.getName());
cluster.removeStripe(source.getUID());
break;
}
default: {
throw new UnsupportedOperationException(operationType.name());
}
}
return cluster;
}
@Override
protected TopologyNomadChange buildNomadChange(Cluster result) {
switch (operationType) {
case NODE:
return new NodeRemovalNomadChange(
result,
destinationCluster.getStripeByNode(source.getUID()).get().getUID(),
destinationCluster.getNode(source.getUID()).get());
case STRIPE: {
return new StripeRemovalNomadChange(result, stripeToDetach);
}
default: {
throw new UnsupportedOperationException(operationType.name());
}
}
}
@Override
protected void onNomadChangeReady(TopologyNomadChange nomadChange) {
// When the operation type is node, the nodes being detached should be stopped first manually
// But if the operation type is stripe, the stripes being detached are stopped automatically after they're removed
if (operationType == NODE) {
resetAndStopNodesToRemove();
}
}
@Override
protected void onNomadChangeSuccess(TopologyNomadChange nomadChange) {
// When the operation type is node, the nodes being detached should be stopped first manually
// But if the operation type is stripe, the stripes being detached are stopped automatically after they're removed
if (operationType == STRIPE) {
resetAndStopNodesToRemove();
}
}
@Override
protected Collection<Endpoint> getAllOnlineSourceNodes() {
return onlineNodesToRemove;
}
private void resetAndStopNodesToRemove() {
if (!onlineNodesToRemove.isEmpty()) {
output.info("Reset nodes: {}", toString(onlineNodesToRemove));
for (Endpoint endpoint : onlineNodesToRemove) {
try {
reset(endpoint);
} catch (RuntimeException e) {
LOGGER.warn("Error during reset of node: {}: {}", endpoint, e.getMessage(), e);
}
}
output.info("Stopping nodes: {}", toString(onlineNodesToRemove));
stopNodes(
onlineNodesToRemove,
Duration.ofMillis(stopWaitTime.getQuantity(TimeUnit.MILLISECONDS)),
Duration.ofMillis(stopDelay.getQuantity(TimeUnit.MILLISECONDS)));
// if we have stopped some nodes, we need to update the list of nodes online
destinationOnlineNodes.keySet().removeAll(onlineNodesToRemove);
// if a failover happened, make sure we get the new server states
destinationOnlineNodes.entrySet().forEach(e -> e.setValue(getState(e.getKey())));
}
}
private void markNodeForRemoval(UID nodeUID) {
// search if this node is online, if yes, mark it for removal
// "onlineNodesToRemove" keeps track of the nodes to connect to
// to update their topology
destinationOnlineNodes.keySet()
.stream()
.filter(endpoint -> endpoint.getNodeUID().equals(nodeUID))
.findAny()
.ifPresent(onlineNodesToRemove::add);
}
}
|
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.wss4j.dom.util;
import java.text.DateFormat;
import java.text.FieldPosition;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.text.ParseException;
import java.util.Date;
import java.util.Locale;
import java.util.TimeZone;
/**
* A {@link DateFormat} for the format of the dateTime simpleType as specified in the
* XML Schema specification. See <a href="http://www.w3.org/TR/xmlschema-2/#dateTime">
* XML Schema Part 2: Datatypes, W3C Recommendation 02 May 2001, Section 3.2.7.1</a>.
*/
public class XmlSchemaDateFormat extends DateFormat {
/**
*
*/
private static final long serialVersionUID = 5152684993503882396L;
/**
* Logger.
*/
private static final org.slf4j.Logger LOG =
org.slf4j.LoggerFactory.getLogger(XmlSchemaDateFormat.class);
/**
* DateFormat for Zulu (UTC) form of an XML Schema dateTime string.
*/
private static final DateFormat DATEFORMAT_XSD_ZULU = new SimpleDateFormat(
"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", Locale.ENGLISH);
static {
DATEFORMAT_XSD_ZULU.setTimeZone(TimeZone.getTimeZone("UTC"));
}
@Override
public void setLenient(boolean lenient) {
DATEFORMAT_XSD_ZULU.setLenient(lenient);
}
/**
* This method was snarfed from <tt>org.apache.axis.encoding.ser.CalendarDeserializer</tt>,
* which was written by Sam Ruby ([email protected]) and Rich Scheuerle ([email protected]).
* Better error reporting was added.
*
* @see DateFormat#parse(java.lang.String)
*/
public Date parse(String src, ParsePosition parsePos) {
Date date;
// validate fixed portion of format
int index = 0;
try {
if (src != null) {
if (src.charAt(0) == '+' || src.charAt(0) == '-') {
src = src.substring(1);
}
if (src.length() < 19) {
parsePos.setIndex(src.length() - 1);
handleParseError(parsePos, "TOO_FEW_CHARS");
}
validateChar(src, parsePos, index = 4, '-', "EXPECTED_DASH");
validateChar(src, parsePos, index = 7, '-', "EXPECTED_DASH");
validateChar(src, parsePos, index = 10, 'T', "EXPECTED_CAPITAL_T");
validateChar(src, parsePos, index = 13, ':', "EXPECTED_COLON_IN_TIME");
validateChar(src, parsePos, index = 16, ':', "EXPECTED_COLON_IN_TIME");
}
// convert what we have validated so far
synchronized (DATEFORMAT_XSD_ZULU) {
date = DATEFORMAT_XSD_ZULU.parse(src == null ? null
: src.substring(0, 19) + ".000Z");
}
index = 19;
// parse optional milliseconds
if (src != null) {
if (index < src.length() && src.charAt(index) == '.') {
int milliseconds = 0;
int start = ++index;
while (index < src.length()
&& Character.isDigit(src.charAt(index))) {
index++;
}
String decimal = src.substring(start, index);
if (decimal.length() == 3) {
milliseconds = Integer.parseInt(decimal);
} else if (decimal.length() < 3) {
String substring = decimal + "000";
milliseconds = Integer.parseInt(substring.substring(0, 3));
} else {
milliseconds = Integer.parseInt(decimal.substring(0, 3));
if (decimal.charAt(3) >= '5') {
++milliseconds;
}
}
// add milliseconds to the current date
date.setTime(date.getTime() + milliseconds);
}
// parse optional timezone
if (index + 5 < src.length()
&& (src.charAt(index) == '+' || src.charAt(index) == '-')) {
validateCharIsDigit(src, parsePos, index + 1, "EXPECTED_NUMERAL");
validateCharIsDigit(src, parsePos, index + 2, "EXPECTED_NUMERAL");
validateChar(src, parsePos, index + 3, ':', "EXPECTED_COLON_IN_TIMEZONE");
validateCharIsDigit(src, parsePos, index + 4, "EXPECTED_NUMERAL");
validateCharIsDigit(src, parsePos, index + 5, "EXPECTED_NUMERAL");
final int hours = (src.charAt(index + 1) - '0') * 10
+ src.charAt(index + 2) - '0';
final int mins = (src.charAt(index + 4) - '0') * 10
+ src.charAt(index + 5) - '0';
int millisecs = (hours * 60 + mins) * 60 * 1000;
// subtract millisecs from current date to obtain GMT
if (src.charAt(index) == '+') {
millisecs = -millisecs;
}
date.setTime(date.getTime() + millisecs);
index += 6;
}
if (index < src.length() && src.charAt(index) == 'Z') {
index++;
}
if (index < src.length()) {
handleParseError(parsePos, "TOO_MANY_CHARS");
}
}
} catch (ParseException pe) {
LOG.error(pe.toString(), pe);
index = 0; // IMPORTANT: this tells DateFormat.parse() to throw a ParseException
parsePos.setErrorIndex(index);
date = null;
}
parsePos.setIndex(index);
return date;
}
/**
* @see DateFormat#format(java.util.Date)
*/
public StringBuffer format(Date date, StringBuffer appendBuf,
FieldPosition fieldPos) {
String str;
synchronized (DATEFORMAT_XSD_ZULU) {
str = DATEFORMAT_XSD_ZULU.format(date);
}
if (appendBuf == null) {
appendBuf = new StringBuffer();
}
appendBuf.append(str);
return appendBuf;
}
private void validateChar(String str, ParsePosition parsePos, int index,
char expected, String errorReason) throws ParseException {
if (str.charAt(index) != expected) {
handleParseError(parsePos, errorReason);
}
}
private void validateCharIsDigit(String str, ParsePosition parsePos,
int index, String errorReason) throws ParseException {
if (!Character.isDigit(str.charAt(index))) {
handleParseError(parsePos, errorReason);
}
}
private void handleParseError(ParsePosition parsePos, String errorReason)
throws ParseException {
throw new ParseException(
"INVALID_XSD_DATETIME: " + errorReason,
parsePos.getErrorIndex()
);
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.catalina.connector;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.util.EnumSet;
import javax.servlet.RequestDispatcher;
import javax.servlet.SessionTrackingMode;
import org.apache.catalina.Context;
import org.apache.catalina.Host;
import org.apache.catalina.Wrapper;
import org.apache.catalina.comet.CometEvent;
import org.apache.catalina.comet.CometEvent.EventType;
import org.apache.catalina.core.AsyncContextImpl;
import org.apache.catalina.util.ServerInfo;
import org.apache.catalina.util.SessionConfig;
import org.apache.catalina.util.URLEncoder;
import org.apache.coyote.ActionCode;
import org.apache.coyote.Adapter;
import org.apache.juli.logging.Log;
import org.apache.juli.logging.LogFactory;
import org.apache.tomcat.util.ExceptionUtils;
import org.apache.tomcat.util.buf.B2CConverter;
import org.apache.tomcat.util.buf.ByteChunk;
import org.apache.tomcat.util.buf.CharChunk;
import org.apache.tomcat.util.buf.MessageBytes;
import org.apache.tomcat.util.http.Cookies;
import org.apache.tomcat.util.http.ServerCookie;
import org.apache.tomcat.util.net.SSLSupport;
import org.apache.tomcat.util.net.SocketStatus;
import org.apache.tomcat.util.res.StringManager;
/**
* Implementation of a request processor which delegates the processing to a
* Coyote processor.
*
* @author Craig R. McClanahan
* @author Remy Maucherat
* @version $Id: CoyoteAdapter.java 1240861 2012-02-05 23:30:20Z markt $
*/
public class CoyoteAdapter implements Adapter {
private static final Log log = LogFactory.getLog(CoyoteAdapter.class);
// -------------------------------------------------------------- Constants
private static final String POWERED_BY = "Servlet/3.0 JSP/2.2 " +
"(" + ServerInfo.getServerInfo() + " Java/" +
System.getProperty("java.vm.vendor") + "/" +
System.getProperty("java.runtime.version") + ")";
private static final EnumSet<SessionTrackingMode> SSL_ONLY =
EnumSet.of(SessionTrackingMode.SSL);
public static final int ADAPTER_NOTES = 1;
protected static final boolean ALLOW_BACKSLASH =
Boolean.valueOf(System.getProperty("org.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH", "false")).booleanValue();
// ----------------------------------------------------------- Constructors
/**
* Construct a new CoyoteProcessor associated with the specified connector.
*
* @param connector CoyoteConnector that owns this processor
*/
public CoyoteAdapter(Connector connector) {
super();
this.connector = connector;
}
// ----------------------------------------------------- Instance Variables
/**
* The CoyoteConnector with which this processor is associated.
*/
private Connector connector = null;
/**
* The string manager for this package.
*/
protected static final StringManager sm =
StringManager.getManager(Constants.Package);
/**
* Encoder for the Location URL in HTTP redirects.
*/
protected static URLEncoder urlEncoder;
// ----------------------------------------------------- Static Initializer
/**
* The safe character set.
*/
static {
urlEncoder = new URLEncoder();
urlEncoder.addSafeCharacter('-');
urlEncoder.addSafeCharacter('_');
urlEncoder.addSafeCharacter('.');
urlEncoder.addSafeCharacter('*');
urlEncoder.addSafeCharacter('/');
}
// -------------------------------------------------------- Adapter Methods
/**
* Event method.
*
* @return false to indicate an error, expected or not
*/
@Override
public boolean event(org.apache.coyote.Request req,
org.apache.coyote.Response res, SocketStatus status) {
Request request = (Request) req.getNote(ADAPTER_NOTES);
Response response = (Response) res.getNote(ADAPTER_NOTES);
if (request.getWrapper() == null) {
return false;
}
boolean error = false;
boolean read = false;
try {
if (status == SocketStatus.OPEN) {
if (response.isClosed()) {
// The event has been closed asynchronously, so call end instead of
// read to cleanup the pipeline
request.getEvent().setEventType(CometEvent.EventType.END);
request.getEvent().setEventSubType(null);
} else {
try {
// Fill the read buffer of the servlet layer
if (request.read()) {
read = true;
}
} catch (IOException e) {
error = true;
}
if (read) {
request.getEvent().setEventType(CometEvent.EventType.READ);
request.getEvent().setEventSubType(null);
} else if (error) {
request.getEvent().setEventType(CometEvent.EventType.ERROR);
request.getEvent().setEventSubType(CometEvent.EventSubType.CLIENT_DISCONNECT);
} else {
request.getEvent().setEventType(CometEvent.EventType.END);
request.getEvent().setEventSubType(null);
}
}
} else if (status == SocketStatus.DISCONNECT) {
request.getEvent().setEventType(CometEvent.EventType.ERROR);
request.getEvent().setEventSubType(CometEvent.EventSubType.CLIENT_DISCONNECT);
error = true;
} else if (status == SocketStatus.ERROR) {
request.getEvent().setEventType(CometEvent.EventType.ERROR);
request.getEvent().setEventSubType(CometEvent.EventSubType.IOEXCEPTION);
error = true;
} else if (status == SocketStatus.STOP) {
request.getEvent().setEventType(CometEvent.EventType.END);
request.getEvent().setEventSubType(CometEvent.EventSubType.SERVER_SHUTDOWN);
} else if (status == SocketStatus.TIMEOUT) {
if (response.isClosed()) {
// The event has been closed asynchronously, so call end instead of
// read to cleanup the pipeline
request.getEvent().setEventType(CometEvent.EventType.END);
request.getEvent().setEventSubType(null);
} else {
request.getEvent().setEventType(CometEvent.EventType.ERROR);
request.getEvent().setEventSubType(CometEvent.EventSubType.TIMEOUT);
}
}
req.getRequestProcessor().setWorkerThreadName(Thread.currentThread().getName());
// Calling the container
connector.getService().getContainer().getPipeline().getFirst().event(request, response, request.getEvent());
if (!error && !response.isClosed() && (request.getAttribute(
RequestDispatcher.ERROR_EXCEPTION) != null)) {
// An unexpected exception occurred while processing the event, so
// error should be called
request.getEvent().setEventType(CometEvent.EventType.ERROR);
request.getEvent().setEventSubType(null);
error = true;
connector.getService().getContainer().getPipeline().getFirst().event(request, response, request.getEvent());
}
if (response.isClosed() || !request.isComet()) {
if (status==SocketStatus.OPEN &&
request.getEvent().getEventType() != EventType.END) {
//CometEvent.close was called during an event other than END
request.getEvent().setEventType(CometEvent.EventType.END);
request.getEvent().setEventSubType(null);
error = true;
connector.getService().getContainer().getPipeline().getFirst().event(request, response, request.getEvent());
}
res.action(ActionCode.COMET_END, null);
} else if (!error && read && request.getAvailable()) {
// If this was a read and not all bytes have been read, or if no data
// was read from the connector, then it is an error
request.getEvent().setEventType(CometEvent.EventType.ERROR);
request.getEvent().setEventSubType(CometEvent.EventSubType.IOEXCEPTION);
error = true;
connector.getService().getContainer().getPipeline().getFirst().event(request, response, request.getEvent());
}
return (!error);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
if (!(t instanceof IOException)) {
log.error(sm.getString("coyoteAdapter.service"), t);
}
error = true;
return false;
} finally {
req.getRequestProcessor().setWorkerThreadName(null);
// Recycle the wrapper request and response
if (error || response.isClosed() || !request.isComet()) {
((Context) request.getMappingData().context).logAccess(
request, response,
System.currentTimeMillis() - req.getStartTime(),
false);
request.recycle();
request.setFilterChain(null);
response.recycle();
}
}
}
@Override
public boolean asyncDispatch(org.apache.coyote.Request req,
org.apache.coyote.Response res, SocketStatus status) throws Exception {
Request request = (Request) req.getNote(ADAPTER_NOTES);
Response response = (Response) res.getNote(ADAPTER_NOTES);
if (request == null) {
throw new IllegalStateException(
"Dispatch may only happen on an existing request.");
}
boolean comet = false;
boolean success = true;
AsyncContextImpl asyncConImpl = (AsyncContextImpl)request.getAsyncContext();
req.getRequestProcessor().setWorkerThreadName(Thread.currentThread().getName());
try {
if (!request.isAsync() && !comet) {
// Error or timeout - need to tell listeners the request is over
// Have to test this first since state may change while in this
// method and this is only required if entering this method in
// this state
Context ctxt = (Context) request.getMappingData().context;
if (ctxt != null) {
ctxt.fireRequestDestroyEvent(request);
}
// Lift any suspension (e.g. if sendError() was used by an async
// request
response.setSuspended(false);
}
if (status==SocketStatus.TIMEOUT) {
success = true;
if (!asyncConImpl.timeout()) {
asyncConImpl.setErrorState(null);
}
}
if (request.isAsyncDispatching()) {
success = true;
connector.getService().getContainer().getPipeline().getFirst().invoke(request, response);
Throwable t = (Throwable) request.getAttribute(
RequestDispatcher.ERROR_EXCEPTION);
if (t != null) {
asyncConImpl.setErrorState(t);
}
}
if (request.isComet()) {
if (!response.isClosed() && !response.isError()) {
if (request.getAvailable() || (request.getContentLength() > 0 && (!request.isParametersParsed()))) {
// Invoke a read event right away if there are available bytes
if (event(req, res, SocketStatus.OPEN)) {
comet = true;
res.action(ActionCode.COMET_BEGIN, null);
}
} else {
comet = true;
res.action(ActionCode.COMET_BEGIN, null);
}
} else {
// Clear the filter chain, as otherwise it will not be reset elsewhere
// since this is a Comet request
request.setFilterChain(null);
}
}
if (!request.isAsync() && !comet) {
request.finishRequest();
response.finishResponse();
req.action(ActionCode.POST_REQUEST , null);
((Context) request.getMappingData().context).logAccess(
request, response,
System.currentTimeMillis() - req.getStartTime(),
false);
}
} catch (IOException e) {
success = false;
// Ignore
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
success = false;
log.error(sm.getString("coyoteAdapter.service"), t);
} finally {
req.getRequestProcessor().setWorkerThreadName(null);
// Recycle the wrapper request and response
if (!success || (!comet && !request.isAsync())) {
request.recycle();
response.recycle();
} else {
// Clear converters so that the minimum amount of memory
// is used by this processor
request.clearEncoders();
response.clearEncoders();
}
}
return success;
}
/**
* Service method.
*/
@Override
public void service(org.apache.coyote.Request req,
org.apache.coyote.Response res)
throws Exception {
Request request = (Request) req.getNote(ADAPTER_NOTES);
Response response = (Response) res.getNote(ADAPTER_NOTES);
if (request == null) {
// Create objects
request = connector.createRequest();
request.setCoyoteRequest(req);
response = connector.createResponse();
response.setCoyoteResponse(res);
// Link objects
request.setResponse(response);
response.setRequest(request);
// Set as notes
req.setNote(ADAPTER_NOTES, request);
res.setNote(ADAPTER_NOTES, response);
// Set query string encoding
req.getParameters().setQueryStringEncoding
(connector.getURIEncoding());
}
if (connector.getXpoweredBy()) {
response.addHeader("X-Powered-By", POWERED_BY);
}
boolean comet = false;
boolean async = false;
try {
// Parse and set Catalina and configuration specific
// request parameters
req.getRequestProcessor().setWorkerThreadName(Thread.currentThread().getName());
boolean postParseSuccess = postParseRequest(req, request, res, response);
if (postParseSuccess) {
//check valves if we support async
request.setAsyncSupported(connector.getService().getContainer().getPipeline().isAsyncSupported());
// Calling the container
connector.getService().getContainer().getPipeline().getFirst().invoke(request, response);
if (request.isComet()) {
if (!response.isClosed() && !response.isError()) {
if (request.getAvailable() || (request.getContentLength() > 0 && (!request.isParametersParsed()))) {
// Invoke a read event right away if there are available bytes
if (event(req, res, SocketStatus.OPEN)) {
comet = true;
res.action(ActionCode.COMET_BEGIN, null);
}
} else {
comet = true;
res.action(ActionCode.COMET_BEGIN, null);
}
} else {
// Clear the filter chain, as otherwise it will not be reset elsewhere
// since this is a Comet request
request.setFilterChain(null);
}
}
}
AsyncContextImpl asyncConImpl = (AsyncContextImpl)request.getAsyncContext();
if (asyncConImpl != null) {
async = true;
} else if (!comet) {
request.finishRequest();
response.finishResponse();
if (postParseSuccess &&
request.getMappingData().context != null) {
// Log only if processing was invoked.
// If postParseRequest() failed, it has already logged it.
// If context is null this was the start of a comet request
// that failed and has already been logged.
((Context) request.getMappingData().context).logAccess(
request, response,
System.currentTimeMillis() - req.getStartTime(),
false);
}
req.action(ActionCode.POST_REQUEST , null);
}
} catch (IOException e) {
// Ignore
} finally {
req.getRequestProcessor().setWorkerThreadName(null);
// Recycle the wrapper request and response
if (!comet && !async) {
request.recycle();
response.recycle();
} else {
// Clear converters so that the minimum amount of memory
// is used by this processor
request.clearEncoders();
response.clearEncoders();
}
}
}
@Override
public void log(org.apache.coyote.Request req,
org.apache.coyote.Response res, long time) {
Request request = (Request) req.getNote(ADAPTER_NOTES);
Response response = (Response) res.getNote(ADAPTER_NOTES);
if (request == null) {
// Create objects
request = connector.createRequest();
request.setCoyoteRequest(req);
response = connector.createResponse();
response.setCoyoteResponse(res);
// Link objects
request.setResponse(response);
response.setRequest(request);
// Set as notes
req.setNote(ADAPTER_NOTES, request);
res.setNote(ADAPTER_NOTES, response);
// Set query string encoding
req.getParameters().setQueryStringEncoding
(connector.getURIEncoding());
}
try {
// Log at the lowest level available. logAccess() will be
// automatically called on parent containers.
boolean logged = false;
if (request.mappingData != null) {
if (request.mappingData.context != null) {
logged = true;
((Context) request.mappingData.context).logAccess(
request, response, time, true);
} else if (request.mappingData.host != null) {
logged = true;
((Host) request.mappingData.host).logAccess(
request, response, time, true);
}
}
if (!logged) {
connector.getService().getContainer().logAccess(
request, response, time, true);
}
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.warn(sm.getString("coyoteAdapter.accesslogFail"), t);
} finally {
request.recycle();
response.recycle();
}
}
@Override
public String getDomain() {
return connector.getDomain();
}
// ------------------------------------------------------ Protected Methods
/**
* Parse additional request parameters.
*/
protected boolean postParseRequest(org.apache.coyote.Request req,
Request request,
org.apache.coyote.Response res,
Response response)
throws Exception {
// XXX the processor may have set a correct scheme and port prior to this point,
// in ajp13 protocols dont make sense to get the port from the connector...
// otherwise, use connector configuration
if (! req.scheme().isNull()) {
// use processor specified scheme to determine secure state
request.setSecure(req.scheme().equals("https"));
} else {
// use connector scheme and secure configuration, (defaults to
// "http" and false respectively)
req.scheme().setString(connector.getScheme());
request.setSecure(connector.getSecure());
}
// FIXME: the code below doesnt belongs to here,
// this is only have sense
// in Http11, not in ajp13..
// At this point the Host header has been processed.
// Override if the proxyPort/proxyHost are set
String proxyName = connector.getProxyName();
int proxyPort = connector.getProxyPort();
if (proxyPort != 0) {
req.setServerPort(proxyPort);
}
if (proxyName != null) {
req.serverName().setString(proxyName);
}
// Copy the raw URI to the decodedURI
MessageBytes decodedURI = req.decodedURI();
decodedURI.duplicate(req.requestURI());
// Parse the path parameters. This will:
// - strip out the path parameters
// - convert the decodedURI to bytes
parsePathParameters(req, request);
// URI decoding
// %xx decoding of the URL
try {
req.getURLDecoder().convert(decodedURI, false);
} catch (IOException ioe) {
res.setStatus(400);
res.setMessage("Invalid URI: " + ioe.getMessage());
connector.getService().getContainer().logAccess(
request, response, 0, true);
return false;
}
// Normalization
if (!normalize(req.decodedURI())) {
res.setStatus(400);
res.setMessage("Invalid URI");
connector.getService().getContainer().logAccess(
request, response, 0, true);
return false;
}
// Character decoding
convertURI(decodedURI, request);
// Check that the URI is still normalized
if (!checkNormalize(req.decodedURI())) {
res.setStatus(400);
res.setMessage("Invalid URI character encoding");
connector.getService().getContainer().logAccess(
request, response, 0, true);
return false;
}
// Set the remote principal
String principal = req.getRemoteUser().toString();
if (principal != null) {
request.setUserPrincipal(new CoyotePrincipal(principal));
}
// Set the authorization type
String authtype = req.getAuthType().toString();
if (authtype != null) {
request.setAuthType(authtype);
}
// Request mapping.
MessageBytes serverName;
if (connector.getUseIPVHosts()) {
serverName = req.localName();
if (serverName.isNull()) {
// well, they did ask for it
res.action(ActionCode.REQ_LOCAL_NAME_ATTRIBUTE, null);
}
} else {
serverName = req.serverName();
}
if (request.isAsyncStarted()) {
//TODO SERVLET3 - async
//reset mapping data, should prolly be done elsewhere
request.getMappingData().recycle();
}
boolean mapRequired = true;
String version = null;
while (mapRequired) {
if (version != null) {
// Once we have a version - that is it
mapRequired = false;
}
// This will map the the latest version by default
connector.getMapper().map(serverName, decodedURI, version,
request.getMappingData());
request.setContext((Context) request.getMappingData().context);
request.setWrapper((Wrapper) request.getMappingData().wrapper);
// Single contextVersion therefore no possibility of remap
if (request.getMappingData().contexts == null) {
mapRequired = false;
}
// If there is no context at this point, it is likely no ROOT context
// has been deployed
if (request.getContext() == null) {
res.setStatus(404);
res.setMessage("Not found");
// No context, so use host
Host host = request.getHost();
// Make sure there is a host (might not be during shutdown)
if (host != null) {
host.logAccess(request, response, 0, true);
}
return false;
}
// Now we have the context, we can parse the session ID from the URL
// (if any). Need to do this before we redirect in case we need to
// include the session id in the redirect
String sessionID = null;
if (request.getServletContext().getEffectiveSessionTrackingModes()
.contains(SessionTrackingMode.URL)) {
// Get the session ID if there was one
sessionID = request.getPathParameter(
SessionConfig.getSessionUriParamName(
request.getContext()));
if (sessionID != null) {
request.setRequestedSessionId(sessionID);
request.setRequestedSessionURL(true);
}
}
// Look for session ID in cookies and SSL session
parseSessionCookiesId(req, request);
parseSessionSslId(request);
sessionID = request.getRequestedSessionId();
if (mapRequired) {
if (sessionID == null) {
// No session means no possibility of needing to remap
mapRequired = false;
} else {
// Find the context associated with the session
Object[] objs = request.getMappingData().contexts;
for (int i = (objs.length); i > 0; i--) {
Context ctxt = (Context) objs[i - 1];
if (ctxt.getManager().findSession(sessionID) != null) {
// Was the correct context already mapped?
if (ctxt.equals(request.getMappingData().context)) {
mapRequired = false;
} else {
// Set version so second time through mapping the
// correct context is found
version = ctxt.getWebappVersion();
// Reset mapping
request.getMappingData().recycle();
break;
}
}
}
if (version == null) {
// No matching context found. No need to re-map
mapRequired = false;
}
}
}
}
// Possible redirect
MessageBytes redirectPathMB = request.getMappingData().redirectPath;
if (!redirectPathMB.isNull()) {
String redirectPath = urlEncoder.encode(redirectPathMB.toString());
String query = request.getQueryString();
if (request.isRequestedSessionIdFromURL()) {
// This is not optimal, but as this is not very common, it
// shouldn't matter
redirectPath = redirectPath + ";" +
SessionConfig.getSessionUriParamName(
request.getContext()) +
"=" + request.getRequestedSessionId();
}
if (query != null) {
// This is not optimal, but as this is not very common, it
// shouldn't matter
redirectPath = redirectPath + "?" + query;
}
response.sendRedirect(redirectPath);
request.getContext().logAccess(request, response, 0, true);
return false;
}
// Filter trace method
if (!connector.getAllowTrace()
&& req.method().equalsIgnoreCase("TRACE")) {
Wrapper wrapper = request.getWrapper();
String header = null;
if (wrapper != null) {
String[] methods = wrapper.getServletMethods();
if (methods != null) {
for (int i=0; i<methods.length; i++) {
if ("TRACE".equals(methods[i])) {
continue;
}
if (header == null) {
header = methods[i];
} else {
header += ", " + methods[i];
}
}
}
}
res.setStatus(405);
res.addHeader("Allow", header);
res.setMessage("TRACE method is not allowed");
request.getContext().logAccess(request, response, 0, true);
return false;
}
return true;
}
/**
* Extract the path parameters from the request. This assumes parameters are
* of the form /path;name=value;name2=value2/ etc. Currently only really
* interested in the session ID that will be in this form. Other parameters
* can safely be ignored.
*
* @param req
* @param request
*/
protected void parsePathParameters(org.apache.coyote.Request req,
Request request) {
// Process in bytes (this is default format so this is normally a NO-OP
req.decodedURI().toBytes();
ByteChunk uriBC = req.decodedURI().getByteChunk();
int semicolon = uriBC.indexOf(';', 0);
// What encoding to use? Some platforms, eg z/os, use a default
// encoding that doesn't give the expected result so be explicit
String enc = connector.getURIEncoding();
if (enc == null) {
enc = "ISO-8859-1";
}
Charset charset = null;
try {
charset = B2CConverter.getCharset(enc);
} catch (UnsupportedEncodingException e1) {
log.warn(sm.getString("coyoteAdapter.parsePathParam",
enc));
}
if (log.isDebugEnabled()) {
log.debug(sm.getString("coyoteAdapter.debug", "uriBC",
uriBC.toString()));
log.debug(sm.getString("coyoteAdapter.debug", "semicolon",
String.valueOf(semicolon)));
log.debug(sm.getString("coyoteAdapter.debug", "enc", enc));
}
while (semicolon > -1) {
// Parse path param, and extract it from the decoded request URI
int start = uriBC.getStart();
int end = uriBC.getEnd();
int pathParamStart = semicolon + 1;
int pathParamEnd = ByteChunk.findBytes(uriBC.getBuffer(),
start + pathParamStart, end,
new byte[] {';', '/'});
String pv = null;
if (pathParamEnd >= 0) {
if (charset != null) {
pv = new String(uriBC.getBuffer(), start + pathParamStart,
pathParamEnd - pathParamStart, charset);
}
// Extract path param from decoded request URI
byte[] buf = uriBC.getBuffer();
for (int i = 0; i < end - start - pathParamEnd; i++) {
buf[start + semicolon + i]
= buf[start + i + pathParamEnd];
}
uriBC.setBytes(buf, start,
end - start - pathParamEnd + semicolon);
} else {
if (charset != null) {
pv = new String(uriBC.getBuffer(), start + pathParamStart,
(end - start) - pathParamStart, charset);
}
uriBC.setEnd(start + semicolon);
}
if (log.isDebugEnabled()) {
log.debug(sm.getString("coyoteAdapter.debug", "pathParamStart",
String.valueOf(pathParamStart)));
log.debug(sm.getString("coyoteAdapter.debug", "pathParamEnd",
String.valueOf(pathParamEnd)));
log.debug(sm.getString("coyoteAdapter.debug", "pv", pv));
}
if (pv != null) {
int equals = pv.indexOf('=');
if (equals > -1) {
String name = pv.substring(0, equals);
String value = pv.substring(equals + 1);
request.addPathParameter(name, value);
if (log.isDebugEnabled()) {
log.debug(sm.getString("coyoteAdapter.debug", "equals",
String.valueOf(equals)));
log.debug(sm.getString("coyoteAdapter.debug", "name",
name));
log.debug(sm.getString("coyoteAdapter.debug", "value",
value));
}
}
}
semicolon = uriBC.indexOf(';', semicolon);
}
}
/**
* Look for SSL session ID if required. Only look for SSL Session ID if it
* is the only tracking method enabled.
*/
protected void parseSessionSslId(Request request) {
if (request.getRequestedSessionId() == null &&
SSL_ONLY.equals(request.getServletContext()
.getEffectiveSessionTrackingModes()) &&
request.connector.secure) {
// TODO Is there a better way to map SSL sessions to our sesison ID?
// TODO The request.getAttribute() will cause a number of other SSL
// attribute to be populated. Is this a performance concern?
request.setRequestedSessionId(
request.getAttribute(SSLSupport.SESSION_ID_KEY).toString());
request.setRequestedSessionSSL(true);
}
}
/**
* Parse session id in URL.
*/
protected void parseSessionCookiesId(org.apache.coyote.Request req, Request request) {
// If session tracking via cookies has been disabled for the current
// context, don't go looking for a session ID in a cookie as a cookie
// from a parent context with a session ID may be present which would
// overwrite the valid session ID encoded in the URL
Context context = (Context) request.getMappingData().context;
if (context != null && !context.getServletContext()
.getEffectiveSessionTrackingModes().contains(
SessionTrackingMode.COOKIE)) {
return;
}
// Parse session id from cookies
Cookies serverCookies = req.getCookies();
int count = serverCookies.getCookieCount();
if (count <= 0) {
return;
}
String sessionCookieName = SessionConfig.getSessionCookieName(context);
for (int i = 0; i < count; i++) {
ServerCookie scookie = serverCookies.getCookie(i);
if (scookie.getName().equals(sessionCookieName)) {
// Override anything requested in the URL
if (!request.isRequestedSessionIdFromCookie()) {
// Accept only the first session id cookie
convertMB(scookie.getValue());
request.setRequestedSessionId
(scookie.getValue().toString());
request.setRequestedSessionCookie(true);
request.setRequestedSessionURL(false);
if (log.isDebugEnabled()) {
log.debug(" Requested cookie session id is " +
request.getRequestedSessionId());
}
} else {
if (!request.isRequestedSessionIdValid()) {
// Replace the session id until one is valid
convertMB(scookie.getValue());
request.setRequestedSessionId
(scookie.getValue().toString());
}
}
}
}
}
/**
* Character conversion of the URI.
*/
protected void convertURI(MessageBytes uri, Request request)
throws Exception {
ByteChunk bc = uri.getByteChunk();
int length = bc.getLength();
CharChunk cc = uri.getCharChunk();
cc.allocate(length, -1);
String enc = connector.getURIEncoding();
if (enc != null) {
B2CConverter conv = request.getURIConverter();
try {
if (conv == null) {
conv = new B2CConverter(enc);
request.setURIConverter(conv);
}
} catch (IOException e) {
// Ignore
log.error("Invalid URI encoding; using HTTP default");
connector.setURIEncoding(null);
}
if (conv != null) {
try {
conv.convert(bc, cc, cc.getBuffer().length - cc.getEnd());
uri.setChars(cc.getBuffer(), cc.getStart(),
cc.getLength());
return;
} catch (IOException e) {
log.error("Invalid URI character encoding; trying ascii");
cc.recycle();
}
}
}
// Default encoding: fast conversion
byte[] bbuf = bc.getBuffer();
char[] cbuf = cc.getBuffer();
int start = bc.getStart();
for (int i = 0; i < length; i++) {
cbuf[i] = (char) (bbuf[i + start] & 0xff);
}
uri.setChars(cbuf, 0, length);
}
/**
* Character conversion of the a US-ASCII MessageBytes.
*/
protected void convertMB(MessageBytes mb) {
// This is of course only meaningful for bytes
if (mb.getType() != MessageBytes.T_BYTES) {
return;
}
ByteChunk bc = mb.getByteChunk();
CharChunk cc = mb.getCharChunk();
int length = bc.getLength();
cc.allocate(length, -1);
// Default encoding: fast conversion
byte[] bbuf = bc.getBuffer();
char[] cbuf = cc.getBuffer();
int start = bc.getStart();
for (int i = 0; i < length; i++) {
cbuf[i] = (char) (bbuf[i + start] & 0xff);
}
mb.setChars(cbuf, 0, length);
}
/**
* Normalize URI.
* <p>
* This method normalizes "\", "//", "/./" and "/../". This method will
* return false when trying to go above the root, or if the URI contains
* a null byte.
*
* @param uriMB URI to be normalized
*/
public static boolean normalize(MessageBytes uriMB) {
ByteChunk uriBC = uriMB.getByteChunk();
final byte[] b = uriBC.getBytes();
final int start = uriBC.getStart();
int end = uriBC.getEnd();
// An empty URL is not acceptable
if (start == end) {
return false;
}
// URL * is acceptable
if ((end - start == 1) && b[start] == (byte) '*') {
return true;
}
int pos = 0;
int index = 0;
// Replace '\' with '/'
// Check for null byte
for (pos = start; pos < end; pos++) {
if (b[pos] == (byte) '\\') {
if (ALLOW_BACKSLASH) {
b[pos] = (byte) '/';
} else {
return false;
}
}
if (b[pos] == (byte) 0) {
return false;
}
}
// The URL must start with '/'
if (b[start] != (byte) '/') {
return false;
}
// Replace "//" with "/"
for (pos = start; pos < (end - 1); pos++) {
if (b[pos] == (byte) '/') {
while ((pos + 1 < end) && (b[pos + 1] == (byte) '/')) {
copyBytes(b, pos, pos + 1, end - pos - 1);
end--;
}
}
}
// If the URI ends with "/." or "/..", then we append an extra "/"
// Note: It is possible to extend the URI by 1 without any side effect
// as the next character is a non-significant WS.
if (((end - start) >= 2) && (b[end - 1] == (byte) '.')) {
if ((b[end - 2] == (byte) '/')
|| ((b[end - 2] == (byte) '.')
&& (b[end - 3] == (byte) '/'))) {
b[end] = (byte) '/';
end++;
}
}
uriBC.setEnd(end);
index = 0;
// Resolve occurrences of "/./" in the normalized path
while (true) {
index = uriBC.indexOf("/./", 0, 3, index);
if (index < 0) {
break;
}
copyBytes(b, start + index, start + index + 2,
end - start - index - 2);
end = end - 2;
uriBC.setEnd(end);
}
index = 0;
// Resolve occurrences of "/../" in the normalized path
while (true) {
index = uriBC.indexOf("/../", 0, 4, index);
if (index < 0) {
break;
}
// Prevent from going outside our context
if (index == 0) {
return false;
}
int index2 = -1;
for (pos = start + index - 1; (pos >= 0) && (index2 < 0); pos --) {
if (b[pos] == (byte) '/') {
index2 = pos;
}
}
copyBytes(b, start + index2, start + index + 3,
end - start - index - 3);
end = end + index2 - index - 3;
uriBC.setEnd(end);
index = index2;
}
return true;
}
/**
* Check that the URI is normalized following character decoding.
* <p>
* This method checks for "\", 0, "//", "/./" and "/../". This method will
* return false if sequences that are supposed to be normalized are still
* present in the URI.
*
* @param uriMB URI to be checked (should be chars)
*/
public static boolean checkNormalize(MessageBytes uriMB) {
CharChunk uriCC = uriMB.getCharChunk();
char[] c = uriCC.getChars();
int start = uriCC.getStart();
int end = uriCC.getEnd();
int pos = 0;
// Check for '\' and 0
for (pos = start; pos < end; pos++) {
if (c[pos] == '\\') {
return false;
}
if (c[pos] == 0) {
return false;
}
}
// Check for "//"
for (pos = start; pos < (end - 1); pos++) {
if (c[pos] == '/') {
if (c[pos + 1] == '/') {
return false;
}
}
}
// Check for ending with "/." or "/.."
if (((end - start) >= 2) && (c[end - 1] == '.')) {
if ((c[end - 2] == '/')
|| ((c[end - 2] == '.')
&& (c[end - 3] == '/'))) {
return false;
}
}
// Check for "/./"
if (uriCC.indexOf("/./", 0, 3, 0) >= 0) {
return false;
}
// Check for "/../"
if (uriCC.indexOf("/../", 0, 4, 0) >= 0) {
return false;
}
return true;
}
// ------------------------------------------------------ Protected Methods
/**
* Copy an array of bytes to a different position. Used during
* normalization.
*/
protected static void copyBytes(byte[] b, int dest, int src, int len) {
for (int pos = 0; pos < len; pos++) {
b[pos + dest] = b[pos + src];
}
}
}
|
|
/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.progress.util;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.TaskInfo;
import com.intellij.openapi.wm.ex.ProgressIndicatorEx;
import com.intellij.util.ArrayUtil;
import com.intellij.util.containers.WeakList;
import consulo.localize.LocalizeValue;
import javax.annotation.Nonnull;
import java.util.Collection;
public class AbstractProgressIndicatorExBase extends AbstractProgressIndicatorBase implements ProgressIndicatorEx {
private final boolean myReusable;
private volatile ProgressIndicatorEx[] myStateDelegates;
private volatile WeakList<TaskInfo> myFinished;
private volatile boolean myWasStarted;
private TaskInfo myOwnerTask;
public AbstractProgressIndicatorExBase(boolean reusable) {
myReusable = reusable;
}
public AbstractProgressIndicatorExBase() {
this(false);
}
@Override
public void start() {
synchronized (getLock()) {
super.start();
delegateRunningChange(ProgressIndicator::start);
myWasStarted = true;
}
}
@Override
public void stop() {
super.stop();
delegateRunningChange(ProgressIndicator::stop);
}
@Override
public void cancel() {
super.cancel();
delegateRunningChange(ProgressIndicator::cancel);
}
@Override
public void finish(@Nonnull final TaskInfo task) {
WeakList<TaskInfo> finished = myFinished;
if (finished == null) {
synchronized (getLock()) {
finished = myFinished;
if (finished == null) {
myFinished = finished = new WeakList<>();
}
}
}
if (!finished.addIfAbsent(task)) return;
delegateRunningChange(each -> each.finish(task));
}
@Override
public boolean isFinished(@Nonnull final TaskInfo task) {
Collection<TaskInfo> list = myFinished;
return list != null && list.contains(task);
}
protected void setOwnerTask(TaskInfo owner) {
myOwnerTask = owner;
}
@Override
public void processFinish() {
if (myOwnerTask != null) {
finish(myOwnerTask);
myOwnerTask = null;
}
}
@Override
public final void checkCanceled() {
super.checkCanceled();
delegate(ProgressIndicator::checkCanceled);
}
@Override
public void setTextValue(final LocalizeValue text) {
super.setTextValue(text);
delegateProgressChange(each -> each.setTextValue(text));
}
@Override
public void setText2Value(final LocalizeValue text) {
super.setText2Value(text);
delegateProgressChange(each -> each.setText2Value(text));
}
@Override
public void setFraction(final double fraction) {
super.setFraction(fraction);
delegateProgressChange(each -> each.setFraction(fraction));
}
@Override
public void pushState() {
synchronized (getLock()) {
super.pushState();
delegateProgressChange(ProgressIndicator::pushState);
}
}
@Override
public void popState() {
synchronized (getLock()) {
super.popState();
delegateProgressChange(ProgressIndicator::popState);
}
}
@Override
protected boolean isReuseable() {
return myReusable;
}
@Override
public void setIndeterminate(final boolean indeterminate) {
super.setIndeterminate(indeterminate);
delegateProgressChange(each -> each.setIndeterminate(indeterminate));
}
@Override
public final void addStateDelegate(@Nonnull ProgressIndicatorEx delegate) {
synchronized (getLock()) {
delegate.initStateFrom(this);
ProgressIndicatorEx[] stateDelegates = myStateDelegates;
if (stateDelegates == null) {
myStateDelegates = stateDelegates = new ProgressIndicatorEx[1];
stateDelegates[0] = delegate;
}
else {
// hard throw is essential for avoiding deadlocks
if (ArrayUtil.contains(delegate, stateDelegates)) {
throw new IllegalArgumentException("Already registered: " + delegate);
}
myStateDelegates = ArrayUtil.append(stateDelegates, delegate, ProgressIndicatorEx.class);
}
}
}
protected void delegateProgressChange(@Nonnull IndicatorAction action) {
delegate(action);
onProgressChange();
}
protected void delegateRunningChange(@Nonnull IndicatorAction action) {
delegate(action);
onRunningChange();
}
private void delegate(@Nonnull IndicatorAction action) {
ProgressIndicatorEx[] list = myStateDelegates;
if (list != null) {
for (ProgressIndicatorEx each : list) {
action.execute(each);
}
}
}
protected void onProgressChange() {
}
protected void onRunningChange() {
}
@Override
public boolean wasStarted() {
return myWasStarted;
}
@FunctionalInterface
protected interface IndicatorAction {
void execute(@Nonnull ProgressIndicatorEx each);
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.distributed.dht;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.CacheObject;
import org.apache.ignite.internal.processors.cache.EntryGetResult;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo;
import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
import org.apache.ignite.internal.processors.cache.KeyCacheObject;
import org.apache.ignite.internal.processors.cache.ReaderArguments;
import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException;
import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition;
import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.util.future.GridCompoundFuture;
import org.apache.ignite.internal.util.future.GridCompoundIdentityFuture;
import org.apache.ignite.internal.util.future.GridEmbeddedFuture;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
import org.apache.ignite.internal.util.lang.GridClosureException;
import org.apache.ignite.internal.util.typedef.C2;
import org.apache.ignite.internal.util.typedef.CI1;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteUuid;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.LOST;
import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING;
/**
*
*/
public final class GridDhtGetFuture<K, V> extends GridCompoundIdentityFuture<Collection<GridCacheEntryInfo>>
implements GridDhtFuture<Collection<GridCacheEntryInfo>> {
/** */
private static final long serialVersionUID = 0L;
/** Logger reference. */
private static final AtomicReference<IgniteLogger> logRef = new AtomicReference<>();
/** Logger. */
private static IgniteLogger log;
/** Message ID. */
private long msgId;
/** */
private UUID reader;
/** Read through flag. */
private boolean readThrough;
/** Context. */
private GridCacheContext<K, V> cctx;
/** Keys. */
private Map<KeyCacheObject, Boolean> keys;
/** Reserved partitions. */
private int[] parts;
/** Future ID. */
private IgniteUuid futId;
/** Version. */
private GridCacheVersion ver;
/** Topology version .*/
private AffinityTopologyVersion topVer;
/** Retries because ownership changed. */
private Collection<Integer> retries;
/** Task name. */
private int taskNameHash;
/** Expiry policy. */
private IgniteCacheExpiryPolicy expiryPlc;
/** Skip values flag. */
private boolean skipVals;
/** */
private final boolean recovery;
/** */
private final boolean addReaders;
/** Transaction label. */
private final String txLbl;
/** */
private final MvccSnapshot mvccSnapshot;
/**
* @param cctx Context.
* @param msgId Message ID.
* @param reader Reader.
* @param keys Keys.
* @param readThrough Read through flag.
* @param topVer Topology version.
* @param taskNameHash Task name hash code.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param txLbl Transaction label.
* @param mvccSnapshot MVCC snapshot.
*/
public GridDhtGetFuture(
GridCacheContext<K, V> cctx,
long msgId,
UUID reader,
Map<KeyCacheObject, Boolean> keys,
boolean readThrough,
@NotNull AffinityTopologyVersion topVer,
int taskNameHash,
@Nullable IgniteCacheExpiryPolicy expiryPlc,
boolean skipVals,
boolean recovery,
boolean addReaders,
@Nullable String txLbl,
MvccSnapshot mvccSnapshot
) {
super(CU.<GridCacheEntryInfo>collectionsReducer(keys.size()));
assert reader != null;
assert !F.isEmpty(keys);
this.reader = reader;
this.cctx = cctx;
this.msgId = msgId;
this.keys = keys;
this.readThrough = readThrough;
this.topVer = topVer;
this.taskNameHash = taskNameHash;
this.expiryPlc = expiryPlc;
this.skipVals = skipVals;
this.recovery = recovery;
this.addReaders = addReaders;
this.txLbl = txLbl;
this.mvccSnapshot = mvccSnapshot;
futId = IgniteUuid.randomUuid();
ver = cctx.cache().nextVersion();
if (log == null)
log = U.logger(cctx.kernalContext(), logRef, GridDhtGetFuture.class);
}
/**
* Initializes future.
*/
void init() {
// TODO get rid of force keys request https://issues.apache.org/jira/browse/IGNITE-10251
GridDhtFuture<Object> fut = cctx.group().preloader().request(cctx, keys.keySet(), topVer);
assert !cctx.mvccEnabled() || fut == null; // Should not happen with MVCC enabled.
if (fut != null) {
if (!F.isEmpty(fut.invalidPartitions())) {
if (retries == null)
retries = new HashSet<>();
retries.addAll(fut.invalidPartitions());
}
fut.listen(new CI1<IgniteInternalFuture<Object>>() {
@Override public void apply(IgniteInternalFuture<Object> fut) {
try {
fut.get();
}
catch (IgniteCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to request keys from preloader [keys=" + keys + ", err=" + e + ']');
onDone(e);
return;
}
map0(keys, true);
markInitialized();
}
});
}
else {
map0(keys, false);
markInitialized();
}
}
/** {@inheritDoc} */
@Override public Collection<Integer> invalidPartitions() {
return retries == null ? Collections.<Integer>emptyList() : retries;
}
/**
* @return Future ID.
*/
public IgniteUuid futureId() {
return futId;
}
/**
* @return Future version.
*/
public GridCacheVersion version() {
return ver;
}
/** {@inheritDoc} */
@Override public boolean onDone(Collection<GridCacheEntryInfo> res, Throwable err) {
if (super.onDone(res, err)) {
// Release all partitions reserved by this future.
if (parts != null)
cctx.topology().releasePartitions(parts);
return true;
}
return false;
}
/**
* @param keys Keys to map.
*/
private void map0(Map<KeyCacheObject, Boolean> keys, boolean forceKeys) {
Map<KeyCacheObject, Boolean> mappedKeys = null;
// Assign keys to primary nodes.
for (Map.Entry<KeyCacheObject, Boolean> key : keys.entrySet()) {
int part = cctx.affinity().partition(key.getKey());
if (retries == null || !retries.contains(part)) {
if (!map(key.getKey(), forceKeys)) {
if (retries == null)
retries = new HashSet<>();
retries.add(part);
if (mappedKeys == null) {
mappedKeys = U.newLinkedHashMap(keys.size());
for (Map.Entry<KeyCacheObject, Boolean> key1 : keys.entrySet()) {
if (key1.getKey() == key.getKey())
break;
mappedKeys.put(key.getKey(), key1.getValue());
}
}
}
else if (mappedKeys != null)
mappedKeys.put(key.getKey(), key.getValue());
}
}
// Add new future.
IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut = getAsync(mappedKeys == null ? keys : mappedKeys);
// Optimization to avoid going through compound future,
// if getAsync() has been completed and no other futures added to this
// compound future.
if (fut.isDone() && !hasFutures()) {
if (fut.error() != null)
onDone(fut.error());
else
onDone(fut.result());
return;
}
add(fut);
}
/**
* @param key Key.
* @return {@code True} if mapped.
*/
private boolean map(KeyCacheObject key, boolean forceKeys) {
try {
int keyPart = cctx.affinity().partition(key);
if (cctx.mvccEnabled()) {
boolean noOwners = cctx.topology().owners(keyPart, topVer).isEmpty();
// Force key request is disabled for MVCC. So if there are no partition owners for the given key
// (we have a not strict partition loss policy if we've got here) we need to set flag forceKeys to true
// to avoid useless remapping to other non-owning partitions. For non-mvcc caches the force key request
// is also useless in the such situations, so the same flow is here: allegedly we've made a force key
// request with no results and therefore forceKeys flag may be set to true here.
if (noOwners)
forceKeys = true;
}
GridDhtLocalPartition part = topVer.topologyVersion() > 0 ?
cache().topology().localPartition(keyPart, topVer, true) :
cache().topology().localPartition(keyPart);
if (part == null)
return false;
if (parts == null || !F.contains(parts, part.id())) {
// By reserving, we make sure that partition won't be unloaded while processed.
if (part.reserve()) {
if (forceKeys || (part.state() == OWNING || part.state() == LOST)) {
parts = parts == null ? new int[1] : Arrays.copyOf(parts, parts.length + 1);
parts[parts.length - 1] = part.id();
return true;
}
else {
part.release();
return false;
}
}
else
return false;
}
else
return true;
}
catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug("Attempted to create a partition which does not belong to local node, will remap " +
"[key=" + key + ", part=" + e.partition() + ']');
return false;
}
}
/**
* @param keys Keys to get.
* @return Future for local get.
*/
private IgniteInternalFuture<Collection<GridCacheEntryInfo>> getAsync(
final Map<KeyCacheObject, Boolean> keys
) {
if (F.isEmpty(keys))
return new GridFinishedFuture<Collection<GridCacheEntryInfo>>(
Collections.<GridCacheEntryInfo>emptyList());
String taskName0 = cctx.kernalContext().job().currentTaskName();
if (taskName0 == null)
taskName0 = cctx.kernalContext().task().resolveTaskName(taskNameHash);
final String taskName = taskName0;
GridCompoundFuture<Boolean, Boolean> txFut = null;
ReaderArguments readerArgs = null;
if (addReaders && !skipVals && !cctx.localNodeId().equals(reader)) {
for (Map.Entry<KeyCacheObject, Boolean> k : keys.entrySet()) {
if (!k.getValue())
continue;
while (true) {
GridDhtCacheEntry e = cache().entryExx(k.getKey(), topVer);
try {
if (e.obsolete())
continue;
boolean addReader = !e.deleted();
if (addReader) {
e.unswap(false);
// Entry will be removed on touch() if no data in cache,
// but they could be loaded from store,
// we have to add reader again later.
if (readerArgs == null)
readerArgs = new ReaderArguments(reader, msgId, topVer);
}
// Register reader. If there are active transactions for this entry,
// then will wait for their completion before proceeding.
// TODO: IGNITE-3498:
// TODO: What if any transaction we wait for actually removes this entry?
// TODO: In this case seems like we will be stuck with untracked near entry.
// TODO: To fix, check that reader is contained in the list of readers once
// TODO: again after the returned future completes - if not, try again.
IgniteInternalFuture<Boolean> f = addReader ? e.addReader(reader, msgId, topVer) : null;
if (f != null) {
if (txFut == null)
txFut = new GridCompoundFuture<>(CU.boolReducer());
txFut.add(f);
}
break;
}
catch (IgniteCheckedException err) {
return new GridFinishedFuture<>(err);
}
catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when getting a DHT value: " + e);
}
finally {
e.touch();
}
}
}
if (txFut != null)
txFut.markInitialized();
}
IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> fut;
if (txFut == null || txFut.isDone()) {
fut = cache().getDhtAllAsync(
keys.keySet(),
readerArgs,
readThrough,
taskName,
expiryPlc,
skipVals,
recovery,
txLbl,
mvccSnapshot);
}
else {
final ReaderArguments args = readerArgs;
// If we are here, then there were active transactions for some entries
// when we were adding the reader. In that case we must wait for those
// transactions to complete.
fut = new GridEmbeddedFuture<>(
txFut,
new C2<Boolean, Exception, IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>>>() {
@Override public IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> apply(Boolean b, Exception e) {
if (e != null)
throw new GridClosureException(e);
return cache().getDhtAllAsync(
keys.keySet(),
args,
readThrough,
taskName,
expiryPlc,
skipVals,
recovery,
txLbl,
mvccSnapshot);
}
}
);
}
if (fut.isDone()) {
if (fut.error() != null)
onDone(fut.error());
else
return new GridFinishedFuture<>(toEntryInfos(fut.result()));
}
return new GridEmbeddedFuture<>(
new C2<Map<KeyCacheObject, EntryGetResult>, Exception, Collection<GridCacheEntryInfo>>() {
@Override public Collection<GridCacheEntryInfo> apply(
Map<KeyCacheObject, EntryGetResult> map, Exception e
) {
if (e != null) {
onDone(e);
return Collections.emptyList();
}
else
return toEntryInfos(map);
}
},
fut);
}
/**
* @param map Map to convert.
* @return List of infos.
*/
private Collection<GridCacheEntryInfo> toEntryInfos(Map<KeyCacheObject, EntryGetResult> map) {
if (map.isEmpty())
return Collections.emptyList();
Collection<GridCacheEntryInfo> infos = new ArrayList<>(map.size());
for (Map.Entry<KeyCacheObject, EntryGetResult> entry : map.entrySet()) {
EntryGetResult val = entry.getValue();
assert val != null;
GridCacheEntryInfo info = new GridCacheEntryInfo();
info.cacheId(cctx.cacheId());
info.key(entry.getKey());
info.value(skipVals ? null : (CacheObject)val.value());
info.version(val.version());
info.expireTime(val.expireTime());
info.ttl(val.ttl());
infos.add(info);
}
return infos;
}
/**
* @return DHT cache.
*/
private GridDhtCacheAdapter<K, V> cache() {
return (GridDhtCacheAdapter<K, V>)cctx.cache();
}
}
|
|
package com.battlelancer.seriesguide.appwidget;
import android.app.IntentService;
import android.app.PendingIntent;
import android.appwidget.AppWidgetManager;
import android.appwidget.AppWidgetProvider;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.support.v4.app.TaskStackBuilder;
import android.text.TextUtils;
import android.widget.RemoteViews;
import com.battlelancer.seriesguide.R;
import com.battlelancer.seriesguide.adapters.CalendarAdapter;
import com.battlelancer.seriesguide.settings.CalendarSettings;
import com.battlelancer.seriesguide.settings.DisplaySettings;
import com.battlelancer.seriesguide.thetvdbapi.TvdbTools;
import com.battlelancer.seriesguide.ui.ShowsActivity;
import com.battlelancer.seriesguide.util.DBUtils;
import com.battlelancer.seriesguide.util.EpisodeTools;
import com.battlelancer.seriesguide.util.ServiceUtils;
import com.battlelancer.seriesguide.util.TextTools;
import com.battlelancer.seriesguide.util.TimeTools;
import java.io.IOException;
import java.util.Date;
import timber.log.Timber;
public class AppWidget extends AppWidgetProvider {
public static final String REFRESH = "com.battlelancer.seriesguide.appwidget.REFRESH";
private static final String LIMIT = "1";
private static final int LAYOUT = R.layout.appwidget;
private static final int ITEMLAYOUT = R.layout.appwidget_big_item;
@Override
public void onReceive(Context context, Intent intent) {
if (context == null || intent == null) {
// check for null as super.onReceive does not
// we only need to guard here as other methods are only called by super.onReceive
return;
}
if (REFRESH.equals(intent.getAction())) {
context.startService(createUpdateIntent(context));
} else {
super.onReceive(context, intent);
}
}
@Override
public void onUpdate(Context context, AppWidgetManager appWidgetManager, int[] appWidgetIds) {
context.startService(createUpdateIntent(context));
}
/**
* Creates an intent which must include the update service class to start.
*/
public Intent createUpdateIntent(Context context) {
return new Intent(context, UpdateService.class);
}
public static class UpdateService extends IntentService {
public UpdateService() {
super("appwidget.AppWidget$UpdateService");
}
@Override
public void onHandleIntent(Intent intent) {
ComponentName me = new ComponentName(this, AppWidget.class);
AppWidgetManager mgr = AppWidgetManager.getInstance(this);
Intent i = new Intent(this, AppWidget.class);
mgr.updateAppWidget(me, buildUpdate(this, LIMIT, LAYOUT, ITEMLAYOUT, i));
}
protected RemoteViews buildUpdate(Context context, String limit, int layout,
int itemLayout, Intent updateIntent) {
// Get the layout for the App Widget, remove existing views
// RemoteViews views = new RemoteViews(context.getPackageName(),
// layout);
final RemoteViews views = new RemoteViews(context.getPackageName(), layout);
views.removeAllViews(R.id.LinearLayoutWidget);
// get upcoming shows (name and next episode text)
final Cursor upcomingEpisodes = DBUtils.getUpcomingEpisodes(context,
CalendarSettings.isOnlyFavorites(context), false);
if (upcomingEpisodes == null || upcomingEpisodes.getCount() == 0) {
// no next episodes exist
RemoteViews item = new RemoteViews(context.getPackageName(), itemLayout);
item.setTextViewText(R.id.textViewWidgetShow,
context.getString(R.string.no_nextepisode));
item.setTextViewText(R.id.textViewWidgetEpisode, "");
item.setTextViewText(R.id.widgetAirtime, "");
item.setTextViewText(R.id.widgetNetwork, "");
views.addView(R.id.LinearLayoutWidget, item);
} else {
boolean displayExactDate = DisplaySettings.isDisplayExactDate(context);
boolean preventSpoilers = DisplaySettings.preventSpoilers(context);
int viewsToAdd = Integer.valueOf(limit);
while (upcomingEpisodes.moveToNext() && viewsToAdd != 0) {
viewsToAdd--;
RemoteViews item = new RemoteViews(context.getPackageName(), itemLayout);
// upcoming episode
int seasonNumber = upcomingEpisodes.getInt(CalendarAdapter.Query.SEASON);
int episodeNumber = upcomingEpisodes.getInt(CalendarAdapter.Query.NUMBER);
String title = upcomingEpisodes.getString(CalendarAdapter.Query.TITLE);
int watchedFlag = upcomingEpisodes.getInt(CalendarAdapter.Query.WATCHED);
String nextEpisodeString;
if (EpisodeTools.isUnwatched(watchedFlag) && preventSpoilers) {
// just display the episode number
nextEpisodeString = TextTools.getEpisodeNumber(context, seasonNumber,
episodeNumber);
} else {
// display episode number and title
nextEpisodeString = TextTools.getNextEpisodeString(context, seasonNumber,
episodeNumber, title);
}
item.setTextViewText(R.id.textViewWidgetEpisode, nextEpisodeString);
Date actualRelease = TimeTools.applyUserOffset(context,
upcomingEpisodes.getLong(CalendarAdapter.Query.RELEASE_TIME_MS)
);
// "Oct 31 (Fri)" or "in 13 mins (Fri)"
String dateTime = displayExactDate ?
TimeTools.formatToLocalDateShort(context, actualRelease)
: TimeTools.formatToLocalRelativeTime(context, actualRelease);
item.setTextViewText(R.id.widgetAirtime,
getString(R.string.release_date_and_day,
dateTime,
TimeTools.formatToLocalDay(actualRelease))
);
// absolute release time and network (if any)
String releaseTime = TimeTools.formatToLocalTime(context, actualRelease);
String network = upcomingEpisodes.getString(
CalendarAdapter.Query.SHOW_NETWORK);
if (!TextUtils.isEmpty(network)) {
releaseTime += " " + network;
}
item.setTextViewText(R.id.widgetNetwork, releaseTime);
// show title
item.setTextViewText(R.id.textViewWidgetShow,
upcomingEpisodes.getString(CalendarAdapter.Query.SHOW_TITLE));
// show poster
String posterPath = upcomingEpisodes.getString(
CalendarAdapter.Query.SHOW_POSTER);
maybeSetPoster(item, posterPath);
views.addView(R.id.LinearLayoutWidget, item);
}
}
if (upcomingEpisodes != null) {
upcomingEpisodes.close();
}
// Create an Intent to launch Upcoming
Intent activityIntent = new Intent(context, ShowsActivity.class);
activityIntent.putExtra(ShowsActivity.InitBundle.SELECTED_TAB,
ShowsActivity.InitBundle.INDEX_TAB_UPCOMING);
PendingIntent activityPendingIntent = TaskStackBuilder
.create(context)
.addNextIntent(activityIntent)
.getPendingIntent(0, 0);
views.setOnClickPendingIntent(R.id.LinearLayoutWidget, activityPendingIntent);
// Create an intent to update the widget
updateIntent.setAction(REFRESH);
PendingIntent pi = PendingIntent.getBroadcast(context, 0, updateIntent, 0);
views.setOnClickPendingIntent(R.id.ImageButtonWidget, pi);
return views;
}
private void maybeSetPoster(RemoteViews item, String posterPath) {
try {
Bitmap poster = ServiceUtils.loadWithPicasso(this,
TvdbTools.buildPosterUrl(posterPath))
.centerCrop()
.resizeDimen(R.dimen.show_poster_width, R.dimen.show_poster_height)
.get();
item.setImageViewBitmap(R.id.widgetPoster, poster);
} catch (IOException e) {
Timber.e(e, "maybeSetPoster: failed.");
}
}
}
}
|
|
/*
* $Id: OValValidationInterceptor.java 1099157 2011-05-03 17:53:55Z jogep $
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.struts2.oval.interceptor;
import com.opensymphony.xwork2.ActionInvocation;
import com.opensymphony.xwork2.ActionProxy;
import com.opensymphony.xwork2.ModelDriven;
import com.opensymphony.xwork2.Validateable;
import com.opensymphony.xwork2.inject.Inject;
import com.opensymphony.xwork2.interceptor.MethodFilterInterceptor;
import com.opensymphony.xwork2.interceptor.PrefixMethodInvocationUtil;
import com.opensymphony.xwork2.util.ValueStack;
import com.opensymphony.xwork2.util.logging.Logger;
import com.opensymphony.xwork2.util.logging.LoggerFactory;
import com.opensymphony.xwork2.validator.DelegatingValidatorContext;
import com.opensymphony.xwork2.validator.ValidatorContext;
import net.sf.oval.ConstraintViolation;
import net.sf.oval.Validator;
import net.sf.oval.configuration.Configurer;
import net.sf.oval.context.FieldContext;
import net.sf.oval.context.MethodReturnValueContext;
import net.sf.oval.context.OValContext;
import org.apache.commons.lang.StringUtils;
import org.apache.struts2.oval.annotation.Profiles;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.List;
/*
This interceptor provides validation using the OVal validation framework
*/
public class OValValidationInterceptor extends MethodFilterInterceptor {
private static final Logger LOG = LoggerFactory.getLogger(OValValidationInterceptor.class);
protected final static String VALIDATE_PREFIX = "validate";
protected final static String ALT_VALIDATE_PREFIX = "validateDo";
protected boolean alwaysInvokeValidate = true;
protected boolean programmatic = true;
protected OValValidationManager validationManager;
private boolean validateJPAAnnotations;
@Inject
public void setValidationManager(OValValidationManager validationManager) {
this.validationManager = validationManager;
}
/**
* Enable OVal support fopr JPA
*/
public void setValidateJPAAnnotations(boolean validateJPAAnnotations) {
this.validateJPAAnnotations = validateJPAAnnotations;
}
/**
* Determines if {@link com.opensymphony.xwork2.Validateable}'s <code>validate()</code> should be called,
* as well as methods whose name that start with "validate". Defaults to "true".
*
* @param programmatic <tt>true</tt> then <code>validate()</code> is invoked.
*/
public void setProgrammatic(boolean programmatic) {
this.programmatic = programmatic;
}
/**
* Determines if {@link com.opensymphony.xwork2.Validateable}'s <code>validate()</code> should always
* be invoked. Default to "true".
*
* @param alwaysInvokeValidate <tt>true</tt> then <code>validate()</code> is always invoked.
*/
public void setAlwaysInvokeValidate(String alwaysInvokeValidate) {
this.alwaysInvokeValidate = Boolean.parseBoolean(alwaysInvokeValidate);
}
protected String doIntercept(ActionInvocation invocation) throws Exception {
Object action = invocation.getAction();
ActionProxy proxy = invocation.getProxy();
ValueStack valueStack = invocation.getStack();
String methodName = proxy.getMethod();
String context = proxy.getConfig().getName();
if (LOG.isDebugEnabled()) {
LOG.debug("Validating [#0/#1] with method [#2]", invocation.getProxy().getNamespace(), invocation.getProxy().getActionName(), methodName);
}
//OVal vallidatio (no XML yet)
performOValValidation(action, valueStack, methodName, context);
//Validatable.valiedate() and validateX()
performProgrammaticValidation(invocation, action);
return invocation.invoke();
}
private void performProgrammaticValidation(ActionInvocation invocation, Object action) throws Exception {
if (action instanceof Validateable && programmatic) {
// keep exception that might occured in validateXXX or validateDoXXX
Exception exception = null;
Validateable validateable = (Validateable) action;
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking validate() on action [#0]", validateable.toString());
}
try {
PrefixMethodInvocationUtil.invokePrefixMethod(
invocation,
new String[]{VALIDATE_PREFIX, ALT_VALIDATE_PREFIX});
} catch (Exception e) {
// If any exception occurred while doing reflection, we want
// validate() to be executed
if (LOG.isWarnEnabled()) {
LOG.warn("An exception occured while executing the prefix method", e);
}
exception = e;
}
if (alwaysInvokeValidate) {
validateable.validate();
}
if (exception != null) {
// rethrow if something is wrong while doing validateXXX / validateDoXXX
throw exception;
}
}
}
protected void performOValValidation(Object action, ValueStack valueStack, String methodName, String context) throws NoSuchMethodException {
Class clazz = action.getClass();
//read validation from xmls
List<Configurer> configurers = validationManager.getConfigurers(clazz, context, validateJPAAnnotations);
Validator validator = configurers.isEmpty() ? new Validator() : new Validator(configurers);
//if the method is annotated with a @Profiles annotation, use those profiles
Method method = clazz.getMethod(methodName, new Class[0]);
if (method != null) {
Profiles profiles = method.getAnnotation(Profiles.class);
if (profiles != null) {
String[] profileNames = profiles.value();
if (profileNames != null && profileNames.length > 0) {
validator.disableAllProfiles();
if (LOG.isDebugEnabled()) {
LOG.debug("Enabling profiles [#0]", StringUtils.join(profileNames, ","));
}
for (String profileName : profileNames)
validator.enableProfile(profileName);
}
}
}
//perform validation
List<ConstraintViolation> violations = validator.validate(action);
addValidationErrors(violations.toArray(new ConstraintViolation[0]), action, valueStack, null);
}
private void addValidationErrors(ConstraintViolation[] violations, Object action, ValueStack valueStack, String parentFieldname) {
if (violations != null) {
ValidatorContext validatorContext = new DelegatingValidatorContext(action);
for (ConstraintViolation violation : violations) {
//translate message
String key = violation.getMessage();
//push the validator into the stack
valueStack.push(violation.getContext());
String message = key;
try {
message = validatorContext.getText(key);
} finally {
valueStack.pop();
}
if (isActionError(violation)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding action error '#0'", message);
}
validatorContext.addActionError(message);
} else {
ValidationError validationError = buildValidationError(violation, message);
// build field name
String fieldName = validationError.getFieldName();
if (parentFieldname != null) {
fieldName = parentFieldname + "." + fieldName;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Adding field error [#0] with message '#1'", fieldName, validationError.getMessage());
}
validatorContext.addFieldError(fieldName, validationError.getMessage());
// don't add "model." prefix to fields of model in model driven action
if ((action instanceof ModelDriven) && "model".equals(fieldName)) {
fieldName = null;
}
// add violations of member object fields
addValidationErrors(violation.getCauses(), action, valueStack, fieldName);
}
}
}
}
/**
* Get field name and message, used to add the validation error to fieldErrors
*/
protected ValidationError buildValidationError(ConstraintViolation violation, String message) {
OValContext context = violation.getContext();
if (context instanceof FieldContext) {
Field field = ((FieldContext) context).getField();
String className = field.getDeclaringClass().getName();
//the default OVal message shows the field name as ActionClass.fieldName
String finalMessage = StringUtils.removeStart(message, className + ".");
return new ValidationError(field.getName(), finalMessage);
} else if (context instanceof MethodReturnValueContext) {
Method method = ((MethodReturnValueContext) context).getMethod();
String className = method.getDeclaringClass().getName();
String methodName = method.getName();
//the default OVal message shows the field name as ActionClass.fieldName
String finalMessage = StringUtils.removeStart(message, className + ".");
String fieldName = null;
if (methodName.startsWith("get")) {
fieldName = StringUtils.uncapitalize(StringUtils.removeStart(methodName, "get"));
} else if (methodName.startsWith("is")) {
fieldName = StringUtils.uncapitalize(StringUtils.removeStart(methodName, "is"));
}
//the result will have the full method name, like "getName()", replace it by "name" (obnly if it is a field)
if (fieldName != null)
finalMessage = finalMessage.replaceAll(methodName + "\\(.*?\\)", fieldName);
return new ValidationError(StringUtils.defaultString(fieldName, methodName), finalMessage);
}
return new ValidationError(violation.getCheckName(), message);
}
/**
* Decide if a violation should be added to the fieldErrors or actionErrors
*/
protected boolean isActionError(ConstraintViolation violation) {
return false;
}
class ValidationError {
private String fieldName;
private String message;
ValidationError(String fieldName, String message) {
this.fieldName = fieldName;
this.message = message;
}
public String getFieldName() {
return fieldName;
}
public String getMessage() {
return message;
}
}
}
|
|
package net.kencochrane.raven.log4j;
import com.google.common.base.Splitter;
import net.kencochrane.raven.Raven;
import net.kencochrane.raven.RavenFactory;
import net.kencochrane.raven.dsn.Dsn;
import net.kencochrane.raven.dsn.InvalidDsnException;
import net.kencochrane.raven.event.Event;
import net.kencochrane.raven.event.EventBuilder;
import net.kencochrane.raven.event.interfaces.ExceptionInterface;
import net.kencochrane.raven.event.interfaces.StackTraceInterface;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.spi.ErrorCode;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import java.io.IOException;
import java.util.Collections;
import java.util.Date;
import java.util.Map;
/**
* Appender for log4j in charge of sending the logged events to a Sentry server.
*/
public class SentryAppender extends AppenderSkeleton {
/**
* Name of the {@link Event#extra} property containing NDC details.
*/
public static final String LOG4J_NDC = "log4J-NDC";
/**
* Name of the {@link Event#extra} property containing the Thread name.
*/
public static final String THREAD_NAME = "Raven-Threadname";
/**
* Current instance of {@link Raven}.
*
* @see #initRaven()
*/
protected Raven raven;
/**
* DSN property of the appender.
* <p>
* Might be null in which case the DSN should be detected automatically.
* </p>
*/
protected String dsn;
/**
* Name of the {@link RavenFactory} being used.
* <p>
* Might be null in which case the factory should be defined automatically.
* </p>
*/
protected String ravenFactory;
/**
* Additional tags to be sent to sentry.
* <p>
* Might be empty in which case no tags are sent.
* </p>
*/
protected Map<String, String> tags = Collections.emptyMap();
/**
* Creates an instance of SentryAppender.
*/
public SentryAppender() {
}
/**
* Creates an instance of SentryAppender.
*
* @param raven instance of Raven to use with this appender.
*/
public SentryAppender(Raven raven) {
this.raven = raven;
}
/**
* Transforms a {@link Level} into an {@link Event.Level}.
*
* @param level original level as defined in log4j.
* @return log level used within raven.
*/
protected static Event.Level formatLevel(Level level) {
if (level.isGreaterOrEqual(Level.FATAL)) {
return Event.Level.FATAL;
} else if (level.isGreaterOrEqual(Level.ERROR)) {
return Event.Level.ERROR;
} else if (level.isGreaterOrEqual(Level.WARN)) {
return Event.Level.WARNING;
} else if (level.isGreaterOrEqual(Level.INFO)) {
return Event.Level.INFO;
} else if (level.isGreaterOrEqual(Level.ALL)) {
return Event.Level.DEBUG;
} else return null;
}
/**
* Transforms the location info of a log into a stacktrace element (stackframe).
*
* @param location details on the location of the log.
* @return a stackframe.
*/
protected static StackTraceElement asStackTraceElement(LocationInfo location) {
String fileName = (LocationInfo.NA.equals(location.getFileName())) ? null : location.getFileName();
int line = (LocationInfo.NA.equals(location.getLineNumber())) ? -1 : Integer.parseInt(location.getLineNumber());
return new StackTraceElement(location.getClassName(), location.getMethodName(), fileName, line);
}
@Override
public void activateOptions() {
super.activateOptions();
if (raven == null)
initRaven();
}
/**
* Initialises the Raven instance.
*/
protected void initRaven() {
try {
if (dsn == null)
dsn = Dsn.dsnLookup();
raven = RavenFactory.ravenInstance(new Dsn(dsn), ravenFactory);
} catch (InvalidDsnException e) {
getErrorHandler().error("An exception occurred during the retrieval of the DSN for Raven", e,
ErrorCode.ADDRESS_PARSE_FAILURE);
} catch (Exception e) {
getErrorHandler().error("An exception occurred during the creation of a Raven instance", e,
ErrorCode.FILE_OPEN_FAILURE);
}
}
@Override
protected void append(LoggingEvent loggingEvent) {
// Do not log the event if the current thread is managed by raven
if (Raven.isManagingThread())
return;
try {
Raven.startManagingThread();
Event event = buildEvent(loggingEvent);
raven.sendEvent(event);
} catch (Exception e) {
getErrorHandler().error("An exception occurred while creating a new event in Raven", e,
ErrorCode.WRITE_FAILURE);
} finally {
Raven.stopManagingThread();
}
}
/**
* Builds an Event based on the logging event.
*
* @param loggingEvent Log generated.
* @return Event containing details provided by the logging system.
*/
protected Event buildEvent(LoggingEvent loggingEvent) {
EventBuilder eventBuilder = new EventBuilder()
.setTimestamp(new Date(loggingEvent.getTimeStamp()))
.setMessage(loggingEvent.getRenderedMessage())
.setLogger(loggingEvent.getLoggerName())
.setLevel(formatLevel(loggingEvent.getLevel()))
.addExtra(THREAD_NAME, loggingEvent.getThreadName());
if (loggingEvent.getThrowableInformation() != null) {
Throwable throwable = loggingEvent.getThrowableInformation().getThrowable();
eventBuilder.addSentryInterface(new ExceptionInterface(throwable));
} else if (loggingEvent.getLocationInformation().fullInfo != null) {
LocationInfo location = loggingEvent.getLocationInformation();
if (!LocationInfo.NA.equals(location.getFileName()) && !LocationInfo.NA.equals(location.getLineNumber())) {
StackTraceElement[] stackTrace = {asStackTraceElement(location)};
eventBuilder.addSentryInterface(new StackTraceInterface(stackTrace));
}
}
// Set culprit
if (loggingEvent.getLocationInformation().fullInfo != null) {
eventBuilder.setCulprit(asStackTraceElement(loggingEvent.getLocationInformation()));
} else {
eventBuilder.setCulprit(loggingEvent.getLoggerName());
}
if (loggingEvent.getNDC() != null)
eventBuilder.addExtra(LOG4J_NDC, loggingEvent.getNDC());
@SuppressWarnings("unchecked")
Map<String, Object> properties = (Map<String, Object>) loggingEvent.getProperties();
for (Map.Entry<String, Object> mdcEntry : properties.entrySet())
eventBuilder.addExtra(mdcEntry.getKey(), mdcEntry.getValue());
for (Map.Entry<String, String> tagEntry : tags.entrySet())
eventBuilder.addTag(tagEntry.getKey(), tagEntry.getValue());
raven.runBuilderHelpers(eventBuilder);
return eventBuilder.build();
}
public void setRavenFactory(String ravenFactory) {
this.ravenFactory = ravenFactory;
}
public void setDsn(String dsn) {
this.dsn = dsn;
}
/**
* Set the tags that should be sent along with the events.
*
* @param tags A String of tags. key/values are separated by colon(:) and tags are separated by commas(,).
*/
public void setTags(String tags) {
this.tags = Splitter.on(",").withKeyValueSeparator(":").split(tags);
}
@Override
public void close() {
if (this.closed)
return;
this.closed = true;
try {
if (raven != null)
raven.getConnection().close();
} catch (IOException e) {
getErrorHandler().error("An exception occurred while closing the Raven connection", e,
ErrorCode.CLOSE_FAILURE);
}
}
@Override
public boolean requiresLayout() {
return false;
}
}
|
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.dlp.v2.model;
/**
* Options defining BigQuery table and row identifiers.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Data Loss Prevention (DLP) API. For a detailed
* explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class GooglePrivacyDlpV2BigQueryOptions extends com.google.api.client.json.GenericJson {
/**
* References to fields excluded from scanning. This allows you to skip inspection of entire
* columns which you know have no findings.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<GooglePrivacyDlpV2FieldId> excludedFields;
/**
* Table fields that may uniquely identify a row within the table. When
* `actions.saveFindings.outputConfig.table` is specified, the values of columns specified here
* are available in the output table under
* `location.content_locations.record_location.record_key.id_values`. Nested fields such as
* `person.birthdate.year` are allowed.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<GooglePrivacyDlpV2FieldId> identifyingFields;
/**
* Limit scanning only to these fields.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<GooglePrivacyDlpV2FieldId> includedFields;
/**
* Max number of rows to scan. If the table has more rows than this value, the rest of the rows
* are omitted. If not set, or if set to 0, all rows will be scanned. Only one of rows_limit and
* rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key @com.google.api.client.json.JsonString
private java.lang.Long rowsLimit;
/**
* Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded
* down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0.
* Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction
* with TimespanConfig.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Integer rowsLimitPercent;
/**
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String sampleMethod;
/**
* Complete BigQuery table reference.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GooglePrivacyDlpV2BigQueryTable tableReference;
/**
* References to fields excluded from scanning. This allows you to skip inspection of entire
* columns which you know have no findings.
* @return value or {@code null} for none
*/
public java.util.List<GooglePrivacyDlpV2FieldId> getExcludedFields() {
return excludedFields;
}
/**
* References to fields excluded from scanning. This allows you to skip inspection of entire
* columns which you know have no findings.
* @param excludedFields excludedFields or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setExcludedFields(java.util.List<GooglePrivacyDlpV2FieldId> excludedFields) {
this.excludedFields = excludedFields;
return this;
}
/**
* Table fields that may uniquely identify a row within the table. When
* `actions.saveFindings.outputConfig.table` is specified, the values of columns specified here
* are available in the output table under
* `location.content_locations.record_location.record_key.id_values`. Nested fields such as
* `person.birthdate.year` are allowed.
* @return value or {@code null} for none
*/
public java.util.List<GooglePrivacyDlpV2FieldId> getIdentifyingFields() {
return identifyingFields;
}
/**
* Table fields that may uniquely identify a row within the table. When
* `actions.saveFindings.outputConfig.table` is specified, the values of columns specified here
* are available in the output table under
* `location.content_locations.record_location.record_key.id_values`. Nested fields such as
* `person.birthdate.year` are allowed.
* @param identifyingFields identifyingFields or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setIdentifyingFields(java.util.List<GooglePrivacyDlpV2FieldId> identifyingFields) {
this.identifyingFields = identifyingFields;
return this;
}
/**
* Limit scanning only to these fields.
* @return value or {@code null} for none
*/
public java.util.List<GooglePrivacyDlpV2FieldId> getIncludedFields() {
return includedFields;
}
/**
* Limit scanning only to these fields.
* @param includedFields includedFields or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setIncludedFields(java.util.List<GooglePrivacyDlpV2FieldId> includedFields) {
this.includedFields = includedFields;
return this;
}
/**
* Max number of rows to scan. If the table has more rows than this value, the rest of the rows
* are omitted. If not set, or if set to 0, all rows will be scanned. Only one of rows_limit and
* rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
* @return value or {@code null} for none
*/
public java.lang.Long getRowsLimit() {
return rowsLimit;
}
/**
* Max number of rows to scan. If the table has more rows than this value, the rest of the rows
* are omitted. If not set, or if set to 0, all rows will be scanned. Only one of rows_limit and
* rows_limit_percent can be specified. Cannot be used in conjunction with TimespanConfig.
* @param rowsLimit rowsLimit or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setRowsLimit(java.lang.Long rowsLimit) {
this.rowsLimit = rowsLimit;
return this;
}
/**
* Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded
* down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0.
* Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction
* with TimespanConfig.
* @return value or {@code null} for none
*/
public java.lang.Integer getRowsLimitPercent() {
return rowsLimitPercent;
}
/**
* Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded
* down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0.
* Only one of rows_limit and rows_limit_percent can be specified. Cannot be used in conjunction
* with TimespanConfig.
* @param rowsLimitPercent rowsLimitPercent or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setRowsLimitPercent(java.lang.Integer rowsLimitPercent) {
this.rowsLimitPercent = rowsLimitPercent;
return this;
}
/**
* @return value or {@code null} for none
*/
public java.lang.String getSampleMethod() {
return sampleMethod;
}
/**
* @param sampleMethod sampleMethod or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setSampleMethod(java.lang.String sampleMethod) {
this.sampleMethod = sampleMethod;
return this;
}
/**
* Complete BigQuery table reference.
* @return value or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryTable getTableReference() {
return tableReference;
}
/**
* Complete BigQuery table reference.
* @param tableReference tableReference or {@code null} for none
*/
public GooglePrivacyDlpV2BigQueryOptions setTableReference(GooglePrivacyDlpV2BigQueryTable tableReference) {
this.tableReference = tableReference;
return this;
}
@Override
public GooglePrivacyDlpV2BigQueryOptions set(String fieldName, Object value) {
return (GooglePrivacyDlpV2BigQueryOptions) super.set(fieldName, value);
}
@Override
public GooglePrivacyDlpV2BigQueryOptions clone() {
return (GooglePrivacyDlpV2BigQueryOptions) super.clone();
}
}
|
|
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.testsuite.federation.storage.ldap;
import org.junit.Assert;
import org.keycloak.common.util.MultivaluedHashMap;
import org.keycloak.component.ComponentModel;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.LDAPConstants;
import org.keycloak.models.RealmModel;
import org.keycloak.models.UserCredentialModel;
import org.keycloak.models.UserModel;
import org.keycloak.models.UserProvider;
import org.keycloak.models.utils.KeycloakModelUtils;
import org.keycloak.models.utils.UserModelDelegate;
import org.keycloak.representations.idm.CredentialRepresentation;
import org.keycloak.representations.idm.SynchronizationResultRepresentation;
import org.keycloak.storage.UserStorageProvider;
import org.keycloak.storage.ldap.LDAPStorageProvider;
import org.keycloak.storage.ldap.LDAPConfig;
import org.keycloak.storage.ldap.LDAPUtils;
import org.keycloak.storage.ldap.idm.model.LDAPObject;
import org.keycloak.storage.ldap.idm.query.internal.LDAPQuery;
import org.keycloak.storage.ldap.idm.store.ldap.LDAPIdentityStore;
import org.keycloak.storage.ldap.mappers.LDAPStorageMapper;
import org.keycloak.storage.ldap.mappers.UserAttributeLDAPStorageMapper;
import org.keycloak.storage.ldap.mappers.UserAttributeLDAPStorageMapperFactory;
import org.keycloak.storage.ldap.mappers.membership.LDAPGroupMapperMode;
import org.keycloak.storage.ldap.mappers.membership.group.GroupLDAPStorageMapper;
import org.keycloak.storage.ldap.mappers.membership.group.GroupLDAPStorageMapperFactory;
import org.keycloak.storage.ldap.mappers.membership.group.GroupMapperConfig;
import org.keycloak.storage.ldap.mappers.membership.role.RoleLDAPStorageMapper;
import org.keycloak.storage.ldap.mappers.membership.role.RoleLDAPStorageMapperFactory;
import org.keycloak.storage.ldap.mappers.membership.role.RoleMapperConfig;
import org.keycloak.storage.user.SynchronizationResult;
import org.keycloak.testsuite.rule.LDAPRule;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* @author <a href="mailto:[email protected]">Marek Posolda</a>
*/
public class LDAPTestUtils {
public static MultivaluedHashMap<String, String> getLdapRuleConfig(LDAPRule ldapRule) {
Map<String,String> ldapConfig = ldapRule.getConfig();
return toComponentConfig(ldapConfig);
}
public static MultivaluedHashMap<String, String> toComponentConfig(Map<String, String> ldapConfig) {
MultivaluedHashMap<String, String> config = new MultivaluedHashMap<>();
for (Map.Entry<String, String> entry : ldapConfig.entrySet()) {
config.add(entry.getKey(), entry.getValue());
}
return config;
}
public static UserModel addLocalUser(KeycloakSession session, RealmModel realm, String username, String email, String password) {
UserModel user = session.userLocalStorage().addUser(realm, username);
user.setEmail(email);
user.setEnabled(true);
UserCredentialModel creds = new UserCredentialModel();
creds.setType(CredentialRepresentation.PASSWORD);
creds.setValue(password);
session.userCredentialManager().updateCredential(realm, user, creds);
return user;
}
public static LDAPObject addLDAPUser(LDAPStorageProvider ldapProvider, RealmModel realm, final String username,
final String firstName, final String lastName, final String email, final String street, final String... postalCode) {
UserModel helperUser = new UserModelDelegate(null) {
@Override
public String getUsername() {
return username;
}
@Override
public String getFirstName() {
return firstName;
}
@Override
public String getLastName() {
return lastName;
}
@Override
public String getEmail() {
return email;
}
@Override
public List<String> getAttribute(String name) {
if ("postal_code".equals(name) && postalCode != null && postalCode.length > 0) {
return Arrays.asList(postalCode);
} else if ("street".equals(name) && street != null) {
return Collections.singletonList(street);
} else {
return Collections.emptyList();
}
}
};
return LDAPUtils.addUserToLDAP(ldapProvider, realm, helperUser);
}
public static void updateLDAPPassword(LDAPStorageProvider ldapProvider, LDAPObject ldapUser, String password) {
ldapProvider.getLdapIdentityStore().updatePassword(ldapUser, password, null);
// Enable MSAD user through userAccountControls
if (ldapProvider.getLdapIdentityStore().getConfig().isActiveDirectory()) {
ldapUser.setSingleAttribute(LDAPConstants.USER_ACCOUNT_CONTROL, "512");
ldapProvider.getLdapIdentityStore().update(ldapUser);
}
}
public static LDAPStorageProvider getLdapProvider(KeycloakSession keycloakSession, ComponentModel ldapFedModel) {
return (LDAPStorageProvider)keycloakSession.getProvider(UserStorageProvider.class, ldapFedModel);
}
public static UserModel assertUserImported(UserProvider userProvider, RealmModel realm, String username, String expectedFirstName, String expectedLastName, String expectedEmail, String expectedPostalCode) {
UserModel user = userProvider.getUserByUsername(username, realm);
Assert.assertNotNull(user);
Assert.assertEquals(expectedFirstName, user.getFirstName());
Assert.assertEquals(expectedLastName, user.getLastName());
Assert.assertEquals(expectedEmail, user.getEmail());
Assert.assertEquals(expectedPostalCode, user.getFirstAttribute("postal_code"));
return user;
}
public static void assertLoaded(UserModel user, String username, String expectedFirstName, String expectedLastName, String expectedEmail, String expectedPostalCode) {
Assert.assertNotNull(user);
Assert.assertEquals(expectedFirstName, user.getFirstName());
Assert.assertEquals(expectedLastName, user.getLastName());
Assert.assertEquals(expectedEmail, user.getEmail());
Assert.assertEquals(expectedPostalCode, user.getFirstAttribute("postal_code"));
}
// CRUD model mappers
public static void addZipCodeLDAPMapper(RealmModel realm, ComponentModel providerModel) {
addUserAttributeMapper(realm, providerModel, "zipCodeMapper", "postal_code", LDAPConstants.POSTAL_CODE);
}
public static ComponentModel addUserAttributeMapper(RealmModel realm, ComponentModel providerModel, String mapperName, String userModelAttributeName, String ldapAttributeName) {
ComponentModel mapperModel = KeycloakModelUtils.createComponentModel(mapperName, providerModel.getId(), UserAttributeLDAPStorageMapperFactory.PROVIDER_ID, LDAPStorageMapper.class.getName(),
UserAttributeLDAPStorageMapper.USER_MODEL_ATTRIBUTE, userModelAttributeName,
UserAttributeLDAPStorageMapper.LDAP_ATTRIBUTE, ldapAttributeName,
UserAttributeLDAPStorageMapper.READ_ONLY, "false",
UserAttributeLDAPStorageMapper.ALWAYS_READ_VALUE_FROM_LDAP, "false",
UserAttributeLDAPStorageMapper.IS_MANDATORY_IN_LDAP, "false");
return realm.addComponentModel(mapperModel);
}
public static void addOrUpdateRoleLDAPMappers(RealmModel realm, ComponentModel providerModel, LDAPGroupMapperMode mode) {
ComponentModel mapperModel = getSubcomponentByName(realm, providerModel, "realmRolesMapper");
if (mapperModel != null) {
mapperModel.getConfig().putSingle(RoleMapperConfig.MODE, mode.toString());
realm.updateComponent(mapperModel);
} else {
String baseDn = providerModel.getConfig().getFirst(LDAPConstants.BASE_DN);
mapperModel = KeycloakModelUtils.createComponentModel("realmRolesMapper", providerModel.getId(), RoleLDAPStorageMapperFactory.PROVIDER_ID, LDAPStorageMapper.class.getName(),
RoleMapperConfig.ROLES_DN, "ou=RealmRoles," + baseDn,
RoleMapperConfig.USE_REALM_ROLES_MAPPING, "true",
RoleMapperConfig.MODE, mode.toString());
realm.addComponentModel(mapperModel);
}
mapperModel = getSubcomponentByName(realm, providerModel, "financeRolesMapper");
if (mapperModel != null) {
mapperModel.getConfig().putSingle(RoleMapperConfig.MODE, mode.toString());
realm.updateComponent(mapperModel);
} else {
String baseDn = providerModel.getConfig().getFirst(LDAPConstants.BASE_DN);
mapperModel = KeycloakModelUtils.createComponentModel("financeRolesMapper", providerModel.getId(), RoleLDAPStorageMapperFactory.PROVIDER_ID, LDAPStorageMapper.class.getName(),
RoleMapperConfig.ROLES_DN, "ou=FinanceRoles," + baseDn,
RoleMapperConfig.USE_REALM_ROLES_MAPPING, "false",
RoleMapperConfig.CLIENT_ID, "finance",
RoleMapperConfig.MODE, mode.toString());
realm.addComponentModel(mapperModel);
}
}
public static ComponentModel getSubcomponentByName(RealmModel realm, ComponentModel providerModel, String name) {
List<ComponentModel> components = realm.getComponents(providerModel.getId(), LDAPStorageMapper.class.getName());
for (ComponentModel component : components) {
if (component.getName().equals(name)) {
return component;
}
}
return null;
}
public static void addOrUpdateGroupMapper(RealmModel realm, ComponentModel providerModel, LDAPGroupMapperMode mode, String descriptionAttrName, String... otherConfigOptions) {
ComponentModel mapperModel = getSubcomponentByName(realm, providerModel, "groupsMapper");
if (mapperModel != null) {
mapperModel.getConfig().putSingle(GroupMapperConfig.MODE, mode.toString());
updateGroupMapperConfigOptions(mapperModel, otherConfigOptions);
realm.updateComponent(mapperModel);
} else {
String baseDn = providerModel.getConfig().getFirst(LDAPConstants.BASE_DN);
mapperModel = KeycloakModelUtils.createComponentModel("groupsMapper", providerModel.getId(), GroupLDAPStorageMapperFactory.PROVIDER_ID, LDAPStorageMapper.class.getName(),
GroupMapperConfig.GROUPS_DN, "ou=Groups," + baseDn,
GroupMapperConfig.MAPPED_GROUP_ATTRIBUTES, descriptionAttrName,
GroupMapperConfig.PRESERVE_GROUP_INHERITANCE, "true",
GroupMapperConfig.MODE, mode.toString());
updateGroupMapperConfigOptions(mapperModel, otherConfigOptions);
realm.addComponentModel(mapperModel);
}
}
public static void updateGroupMapperConfigOptions(ComponentModel mapperModel, String... configOptions) {
for (int i=0 ; i<configOptions.length ; i+=2) {
String cfgName = configOptions[i];
String cfgValue = configOptions[i+1];
mapperModel.getConfig().putSingle(cfgName, cfgValue);
}
}
// End CRUD model mappers
public static void syncRolesFromLDAP(RealmModel realm, LDAPStorageProvider ldapProvider, ComponentModel providerModel) {
ComponentModel mapperModel = getSubcomponentByName(realm, providerModel, "realmRolesMapper");
RoleLDAPStorageMapper roleMapper = getRoleMapper(mapperModel, ldapProvider, realm);
roleMapper.syncDataFromFederationProviderToKeycloak(realm);
mapperModel = getSubcomponentByName(realm, providerModel, "financeRolesMapper");
roleMapper = getRoleMapper(mapperModel, ldapProvider, realm);
roleMapper.syncDataFromFederationProviderToKeycloak(realm);
}
public static void removeAllLDAPUsers(LDAPStorageProvider ldapProvider, RealmModel realm) {
LDAPIdentityStore ldapStore = ldapProvider.getLdapIdentityStore();
LDAPQuery ldapQuery = LDAPUtils.createQueryForUserSearch(ldapProvider, realm);
List<LDAPObject> allUsers = ldapQuery.getResultList();
for (LDAPObject ldapUser : allUsers) {
ldapStore.remove(ldapUser);
}
}
public static void removeLDAPUserByUsername(LDAPStorageProvider ldapProvider, RealmModel realm, LDAPConfig config, String username) {
LDAPIdentityStore ldapStore = ldapProvider.getLdapIdentityStore();
LDAPQuery ldapQuery = LDAPUtils.createQueryForUserSearch(ldapProvider, realm);
List<LDAPObject> allUsers = ldapQuery.getResultList();
// This is ugly, we are iterating over the entire set of ldap users and deleting the one where the username matches. TODO: Find a better way!
for (LDAPObject ldapUser : allUsers) {
if (username.equals(LDAPUtils.getUsername(ldapUser, config))) {
ldapStore.remove(ldapUser);
}
}
}
public static void removeAllLDAPRoles(KeycloakSession session, RealmModel appRealm, ComponentModel ldapModel, String mapperName) {
ComponentModel mapperModel = getSubcomponentByName(appRealm, ldapModel, mapperName);
LDAPStorageProvider ldapProvider = LDAPTestUtils.getLdapProvider(session, ldapModel);
LDAPQuery roleQuery = getRoleMapper(mapperModel, ldapProvider, appRealm).createRoleQuery();
List<LDAPObject> ldapRoles = roleQuery.getResultList();
for (LDAPObject ldapRole : ldapRoles) {
ldapProvider.getLdapIdentityStore().remove(ldapRole);
}
}
public static void removeAllLDAPGroups(KeycloakSession session, RealmModel appRealm, ComponentModel ldapModel, String mapperName) {
ComponentModel mapperModel = getSubcomponentByName(appRealm, ldapModel, mapperName);
LDAPStorageProvider ldapProvider = LDAPTestUtils.getLdapProvider(session, ldapModel);
LDAPQuery roleQuery = getGroupMapper(mapperModel, ldapProvider, appRealm).createGroupQuery();
List<LDAPObject> ldapRoles = roleQuery.getResultList();
for (LDAPObject ldapRole : ldapRoles) {
ldapProvider.getLdapIdentityStore().remove(ldapRole);
}
}
public static void createLDAPRole(KeycloakSession session, RealmModel appRealm, ComponentModel ldapModel, String mapperName, String roleName) {
ComponentModel mapperModel = getSubcomponentByName(appRealm, ldapModel, mapperName);
LDAPStorageProvider ldapProvider = LDAPTestUtils.getLdapProvider(session, ldapModel);
getRoleMapper(mapperModel, ldapProvider, appRealm).createLDAPRole(roleName);
}
public static LDAPObject createLDAPGroup(KeycloakSession session, RealmModel appRealm, ComponentModel ldapModel, String groupName, String... additionalAttrs) {
ComponentModel mapperModel = getSubcomponentByName(appRealm, ldapModel, "groupsMapper");
LDAPStorageProvider ldapProvider = LDAPTestUtils.getLdapProvider(session, ldapModel);
Map<String, Set<String>> additAttrs = new HashMap<>();
for (int i=0 ; i<additionalAttrs.length ; i+=2) {
String attrName = additionalAttrs[i];
String attrValue = additionalAttrs[i+1];
additAttrs.put(attrName, Collections.singleton(attrValue));
}
return getGroupMapper(mapperModel, ldapProvider, appRealm).createLDAPGroup(groupName, additAttrs);
}
public static GroupLDAPStorageMapper getGroupMapper(ComponentModel mapperModel, LDAPStorageProvider ldapProvider, RealmModel realm) {
return new GroupLDAPStorageMapper(mapperModel, ldapProvider, new GroupLDAPStorageMapperFactory());
}
public static RoleLDAPStorageMapper getRoleMapper(ComponentModel mapperModel, LDAPStorageProvider ldapProvider, RealmModel realm) {
return new RoleLDAPStorageMapper(mapperModel, ldapProvider, new RoleLDAPStorageMapperFactory());
}
public static void assertSyncEquals(SynchronizationResult syncResult, int expectedAdded, int expectedUpdated, int expectedRemoved, int expectedFailed) {
Assert.assertEquals(expectedAdded, syncResult.getAdded());
Assert.assertEquals(expectedUpdated, syncResult.getUpdated());
Assert.assertEquals(expectedRemoved, syncResult.getRemoved());
Assert.assertEquals(expectedFailed, syncResult.getFailed());
}
public static void assertSyncEquals(SynchronizationResultRepresentation syncResult, int expectedAdded, int expectedUpdated, int expectedRemoved, int expectedFailed) {
Assert.assertEquals(expectedAdded, syncResult.getAdded());
Assert.assertEquals(expectedUpdated, syncResult.getUpdated());
Assert.assertEquals(expectedRemoved, syncResult.getRemoved());
Assert.assertEquals(expectedFailed, syncResult.getFailed());
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.ControlGroup.MemoryControlGroup;
import org.apache.hadoop.syscall.LinuxSystemCall;
import java.io.BufferedReader;
import java.io.FileReader;
/**
* Limits memory usages of a TaskTracker and its Task through a Linux memory
* control group.
*
* Requirement:
* <ul>
* <li>A memory sub-system control group is available</li>
* <li>A path to a target control group is configured
* (mapred.tasktracker.cgroup.mem.root) or the default path will be used
* (/cgroup/memory/tasktrackers)</li>
* <li>A user launching Task has a permission to modify a control group (i.e.
* using chown)</li>
* <li>A memory limit property is set at the target control group</li>
* </ul>
*
* Current limitation of this implementation:
* <ul>
* <li>Support only a single TaskTracker per server</li>
* </ul>
*/
public class TaskTrackerMemoryControlGroup {
private static Log LOG = LogFactory.getLog(TaskTrackerMemoryControlGroup.class);
public static final String CGROUP_MEM_ROOT_PROPERTY = "mapred.container.cgroup.mem.root";
public static final String DEFAULT_CGROUP_MEM_ROOT = "/cgroup/memory/task_container";
public static final String CGROUP_MEM_JT_ROOT = "mapred.jobtracker.cgroup.mem.root";
public static final String DEFAULT_JT_ROOT = "/cgroup/memory/jobtrackers";
public static final String CGROUP_MEM_TT_ROOT = "mapred.tasktracker.cgroup.mem.root";
public static final String DEFAULT_TT_ROOT = "/cgroup/memory/tasktrackers";
// conf to control if we shall setup CGroup memory limit for individual tasks.
// The default is false
public static final String CGROUP_MEM_TASK_LIMIT= "mapred.tasktracker.cgroup.mem.tasklimit";
public static final String CGROUP_TRASH_GROUP_NAME = "trash";
private boolean isAvailable;
private MemoryControlGroup ttcgp;
private MemoryControlGroup jtcgp;
private MemoryControlGroup containercgp;
private MemoryControlGroup trashcgp;
private boolean isTaskLimitOn = false;
private String rootpath;
private AtomicInteger numFailedToAddTask = new AtomicInteger();
public TaskTrackerMemoryControlGroup(Configuration conf) {
if (!MemoryControlGroup.isAvailable()) {
LOG.warn("TaskMemoryControlGroup is disabled because a memory sub-system is not available");
isAvailable = false;
return;
}
String jtRootpath = conf.get(CGROUP_MEM_JT_ROOT, DEFAULT_JT_ROOT);
jtcgp = new MemoryControlGroup(jtRootpath);
jtcgp.enableMoveChargeAtImmigrate();
if (!jtcgp.canControl()) {
LOG.warn("TaskMemoryControlGroup is disabled because jtgroup doesn't have appropriate permission for "
+ jtRootpath);
isAvailable = false;
return;
}
String ttRootpath = conf.get(CGROUP_MEM_TT_ROOT, DEFAULT_TT_ROOT);
ttcgp = new MemoryControlGroup(ttRootpath);
ttcgp.enableMoveChargeAtImmigrate();
if (!ttcgp.canControl()) {
LOG.warn("TaskMemoryControlGroup is disabled because ttgroup doesn't have appropriate permission for "
+ ttRootpath);
isAvailable = false;
return;
}
if (getPID().equals("")) {
LOG.warn("TaskMemoryControlGroup is disabled because JVM_PID is not set for TaskTracker");
isAvailable = false;
return;
}
ttcgp.addToGroup(getPID());
rootpath = conf.get(CGROUP_MEM_ROOT_PROPERTY, DEFAULT_CGROUP_MEM_ROOT);
containercgp = new MemoryControlGroup(rootpath);
if (!containercgp.canControl()) {
LOG.warn("TaskMemoryControlGroup is disabled because TaskTracker does not have appropriate permission for "
+ rootpath);
isAvailable = false;
return;
}
if (containercgp.getMemoryUsageLimit() <= 0) {
LOG.warn("TaskMemoryControlGroup is disabled because memory.limit_in_bytes is not set up");
isAvailable = false;
return;
}
containercgp.enableMoveChargeAtImmigrate();
containercgp.enableUseHierarchy();
isAvailable = true;
isTaskLimitOn = conf.getBoolean(CGROUP_MEM_TASK_LIMIT, false);
trashcgp = containercgp.createSubGroup(CGROUP_TRASH_GROUP_NAME);
trashcgp.disableMoveChargeAtImmigrate();
// delete the old container group. Some tasks are failed to be removed when
// the task tracker exited.
File containDir = new File(rootpath);
for (String child: containDir.list()) {
if (child.startsWith("attempt")) {
LOG.info("Remove " + child);
try {
BufferedReader reader = new BufferedReader(new FileReader(
rootpath + "/" +child + "/tasks"));
String thread = "";
while( ( thread = reader.readLine() ) != null) {
LOG.info(" kill " + thread);
LinuxSystemCall.killProcessGroup(Integer.parseInt(thread));
}
reader.close();
} catch (java.io.IOException e) {
LOG.info("Exception in killing tasks");
}
removeTask(child);
}
}
LOG.info("TaskTrackerMemoryControlGroup is created with memory = " +
containercgp.getMemoryUsageLimit());
}
public int getAndResetNumFailedToAddTask() {
return this.numFailedToAddTask.getAndSet(0);
}
public void addTask(String taskname, String pid, long memoryLimit) {
if (!isAvailable) {
this.numFailedToAddTask.incrementAndGet();
return ;
}
MemoryControlGroup taskcgp = containercgp.createSubGroup(taskname);
taskcgp.enableMoveChargeAtImmigrate();
if (isTaskLimitOn) {
taskcgp.setMemoryUsageLimit(memoryLimit);
LOG.info("Task " + taskname + " is added to control group with memory = " +
memoryLimit );
} else {
LOG.info("Task " + taskname + " is added to control group without limit");
}
taskcgp.addToGroup(pid);
}
public void removeTask(String taskname) {
if (!isAvailable)
return ;
MemoryControlGroup taskcgp = containercgp.getSubGroup(taskname);
trashcgp.addToGroup(taskcgp.getThreadGroupList());
taskcgp.deleteGroup();
}
private static String getPID() {
return System.getenv().get("JVM_PID");
}
public boolean getTaskLimitOn(){
return isTaskLimitOn;
}
public String getRootPath(){
return rootpath;
}
public MemoryControlGroup getContainerMemoryControlGroup() {
return containercgp;
}
public MemoryControlGroup getJTMemoryControlGroup() {
return jtcgp;
}
public MemoryControlGroup getTTMemoryControlGroup() {
return ttcgp;
}
public boolean checkAvailable() {
return isAvailable;
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.taskexecutor;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.highavailability.HighAvailabilityServices;
import org.apache.flink.runtime.jobmaster.JMTMRegistrationSuccess;
import org.apache.flink.runtime.jobmaster.JobMasterGateway;
import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalListener;
import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService;
import org.apache.flink.runtime.registration.RegisteredRpcConnection;
import org.apache.flink.runtime.registration.RegistrationResponse;
import org.apache.flink.runtime.registration.RetryingRegistration;
import org.apache.flink.runtime.rpc.RpcService;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
/**
* This service has the responsibility to monitor the job leaders (the job manager which is leader
* for a given job) for all registered jobs. Upon gaining leadership for a job and detection by the
* job leader service, the service tries to establish a connection to the job leader. After
* successfully establishing a connection, the job leader listener is notified about the new job
* leader and its connection. In case that a job leader loses leadership, the job leader listener
* is notified as well.
*/
public class JobLeaderService {
private static final Logger LOG = LoggerFactory.getLogger(JobLeaderService.class);
/** Self's location, used for the job manager connection */
private final TaskManagerLocation ownLocation;
/** The leader retrieval service and listener for each registered job */
private final Map<JobID, Tuple2<LeaderRetrievalService, JobLeaderService.JobManagerLeaderListener>> jobLeaderServices;
/** Internal state of the service */
private volatile JobLeaderService.State state;
/** Address of the owner of this service. This address is used for the job manager connection */
private String ownerAddress;
/** Rpc service to use for establishing connections */
private RpcService rpcService;
/** High availability services to create the leader retrieval services from */
private HighAvailabilityServices highAvailabilityServices;
/** Job leader listener listening for job leader changes */
private JobLeaderListener jobLeaderListener;
public JobLeaderService(TaskManagerLocation location) {
this.ownLocation = Preconditions.checkNotNull(location);
jobLeaderServices = new HashMap<>(4);
state = JobLeaderService.State.CREATED;
ownerAddress = null;
rpcService = null;
highAvailabilityServices = null;
jobLeaderListener = null;
}
// -------------------------------------------------------------------------------
// Methods
// -------------------------------------------------------------------------------
/**
* Start the job leader service with the given services.
*
* @param initialOwnerAddress to be used for establishing connections (source address)
* @param initialRpcService to be used to create rpc connections
* @param initialHighAvailabilityServices to create leader retrieval services for the different jobs
* @param initialJobLeaderListener listening for job leader changes
*/
public void start(
final String initialOwnerAddress,
final RpcService initialRpcService,
final HighAvailabilityServices initialHighAvailabilityServices,
final JobLeaderListener initialJobLeaderListener) {
if (JobLeaderService.State.CREATED != state) {
throw new IllegalStateException("The service has already been started.");
} else {
LOG.info("Start job leader service.");
this.ownerAddress = Preconditions.checkNotNull(initialOwnerAddress);
this.rpcService = Preconditions.checkNotNull(initialRpcService);
this.highAvailabilityServices = Preconditions.checkNotNull(initialHighAvailabilityServices);
this.jobLeaderListener = Preconditions.checkNotNull(initialJobLeaderListener);
state = JobLeaderService.State.STARTED;
}
}
/**
* Stop the job leader services. This implies stopping all leader retrieval services for the
* different jobs and their leader retrieval listeners.
*
* @throws Exception if an error occurs while stopping the service
*/
public void stop() throws Exception {
LOG.info("Stop job leader service.");
if (JobLeaderService.State.STARTED == state) {
for (Tuple2<LeaderRetrievalService, JobLeaderService.JobManagerLeaderListener> leaderRetrievalServiceEntry: jobLeaderServices.values()) {
LeaderRetrievalService leaderRetrievalService = leaderRetrievalServiceEntry.f0;
JobLeaderService.JobManagerLeaderListener jobManagerLeaderListener = leaderRetrievalServiceEntry.f1;
jobManagerLeaderListener.stop();
leaderRetrievalService.stop();
}
jobLeaderServices.clear();
}
state = JobLeaderService.State.STOPPED;
}
/**
* Check whether the service monitors the given job.
*
* @param jobId identifying the job
* @return True if the given job is monitored; otherwise false
*/
public boolean containsJob(JobID jobId) {
Preconditions.checkState(JobLeaderService.State.STARTED == state, "The service is currently not running.");
return jobLeaderServices.containsKey(jobId);
}
/**
* Remove the given job from being monitored by the job leader service.
*
* @param jobId identifying the job to remove from monitoring
* @throws Exception if an error occurred while stopping the leader retrieval service and listener
*/
public void removeJob(JobID jobId) throws Exception {
Preconditions.checkState(JobLeaderService.State.STARTED == state, "The service is currently not running.");
Tuple2<LeaderRetrievalService, JobLeaderService.JobManagerLeaderListener> entry = jobLeaderServices.remove(jobId);
if (entry != null) {
LOG.info("Remove job {} from job leader monitoring.", jobId);
LeaderRetrievalService leaderRetrievalService = entry.f0;
JobLeaderService.JobManagerLeaderListener jobManagerLeaderListener = entry.f1;
leaderRetrievalService.stop();
jobManagerLeaderListener.stop();
}
}
/**
* Add the given job to be monitored. This means that the service tries to detect leaders for
* this job and then tries to establish a connection to it.
*
* @param jobId identifying the job to monitor
* @param defaultTargetAddress of the job leader
* @throws Exception if an error occurs while starting the leader retrieval service
*/
public void addJob(final JobID jobId, final String defaultTargetAddress) throws Exception {
Preconditions.checkState(JobLeaderService.State.STARTED == state, "The service is currently not running.");
LOG.info("Add job {} for job leader monitoring.", jobId);
final LeaderRetrievalService leaderRetrievalService = highAvailabilityServices.getJobManagerLeaderRetriever(
jobId,
defaultTargetAddress);
JobLeaderService.JobManagerLeaderListener jobManagerLeaderListener = new JobManagerLeaderListener(jobId);
leaderRetrievalService.start(jobManagerLeaderListener);
jobLeaderServices.put(jobId, Tuple2.of(leaderRetrievalService, jobManagerLeaderListener));
}
/**
* Leader listener which tries to establish a connection to a newly detected job leader.
*/
private final class JobManagerLeaderListener implements LeaderRetrievalListener {
/** Job id identifying the job to look for a leader */
private final JobID jobId;
/** Rpc connection to the job leader */
private RegisteredRpcConnection<JobMasterGateway, JMTMRegistrationSuccess> rpcConnection;
/** State of the listener */
private volatile boolean stopped;
/** Leader id of the current job leader */
private volatile UUID currentLeaderId;
private JobManagerLeaderListener(JobID jobId) {
this.jobId = Preconditions.checkNotNull(jobId);
stopped = false;
rpcConnection = null;
currentLeaderId = null;
}
public void stop() {
stopped = true;
if (rpcConnection != null) {
rpcConnection.close();
}
}
@Override
public void notifyLeaderAddress(final String leaderAddress, final UUID leaderId) {
if (stopped) {
LOG.debug("{}'s leader retrieval listener reported a new leader for job {}. " +
"However, the service is no longer running.", JobLeaderService.class.getSimpleName(), jobId);
} else {
LOG.debug("New leader information for job {}. Address: {}, leader id: {}.",
jobId, leaderAddress, leaderId);
if (leaderAddress == null || leaderAddress.isEmpty()) {
// the leader lost leadership but there is no other leader yet.
if (rpcConnection != null) {
rpcConnection.close();
}
jobLeaderListener.jobManagerLostLeadership(jobId, currentLeaderId);
currentLeaderId = leaderId;
} else {
currentLeaderId = leaderId;
if (rpcConnection != null) {
// check if we are already trying to connect to this leader
if (!leaderId.equals(rpcConnection.getTargetLeaderId())) {
rpcConnection.close();
rpcConnection = new JobManagerRegisteredRpcConnection(
LOG,
leaderAddress,
leaderId,
rpcService.getExecutor());
}
} else {
rpcConnection = new JobManagerRegisteredRpcConnection(
LOG,
leaderAddress,
leaderId,
rpcService.getExecutor());
}
// double check for a concurrent stop operation
if (stopped) {
rpcConnection.close();
} else {
LOG.info("Try to register at job manager {} with leader id {}.", leaderAddress, leaderId);
rpcConnection.start();
}
}
}
}
@Override
public void handleError(Exception exception) {
if (stopped) {
LOG.debug("{}'s leader retrieval listener reported an exception for job {}. " +
"However, the service is no longer running.", JobLeaderService.class.getSimpleName(),
jobId, exception);
} else {
jobLeaderListener.handleError(exception);
}
}
/**
* Rpc connection for the job manager <--> task manager connection.
*/
private final class JobManagerRegisteredRpcConnection extends RegisteredRpcConnection<JobMasterGateway, JMTMRegistrationSuccess> {
JobManagerRegisteredRpcConnection(
Logger log,
String targetAddress,
UUID targetLeaderId,
Executor executor) {
super(log, targetAddress, targetLeaderId, executor);
}
@Override
protected RetryingRegistration<JobMasterGateway, JMTMRegistrationSuccess> generateRegistration() {
return new JobLeaderService.JobManagerRetryingRegistration(
LOG,
rpcService,
"JobManager",
JobMasterGateway.class,
getTargetAddress(),
getTargetLeaderId(),
ownerAddress,
ownLocation);
}
@Override
protected void onRegistrationSuccess(JMTMRegistrationSuccess success) {
// filter out old registration attempts
if (getTargetLeaderId().equals(currentLeaderId)) {
log.info("Successful registration at job manager {} for job {}.", getTargetAddress(), jobId);
jobLeaderListener.jobManagerGainedLeadership(jobId, getTargetGateway(), getTargetLeaderId(), success);
} else {
log.debug("Encountered obsolete JobManager registration success from {} with leader session ID {}.", getTargetAddress(), getTargetLeaderId());
}
}
@Override
protected void onRegistrationFailure(Throwable failure) {
// filter out old registration attempts
if (getTargetLeaderId().equals(currentLeaderId)) {
log.info("Failed to register at job manager {} for job {}.", getTargetAddress(), jobId);
jobLeaderListener.handleError(failure);
} else {
log.debug("Obsolete JobManager registration failure from {} with leader session ID {}.", getTargetAddress(), getTargetLeaderId(), failure);
}
}
}
}
/**
* Retrying registration for the job manager <--> task manager connection.
*/
private static final class JobManagerRetryingRegistration
extends RetryingRegistration<JobMasterGateway, JMTMRegistrationSuccess>
{
private final String taskManagerRpcAddress;
private final TaskManagerLocation taskManagerLocation;
JobManagerRetryingRegistration(
Logger log,
RpcService rpcService,
String targetName,
Class<JobMasterGateway> targetType,
String targetAddress,
UUID leaderId,
String taskManagerRpcAddress,
TaskManagerLocation taskManagerLocation)
{
super(log, rpcService, targetName, targetType, targetAddress, leaderId);
this.taskManagerRpcAddress = taskManagerRpcAddress;
this.taskManagerLocation = Preconditions.checkNotNull(taskManagerLocation);
}
@Override
protected CompletableFuture<RegistrationResponse> invokeRegistration(
JobMasterGateway gateway, UUID leaderId, long timeoutMillis) throws Exception
{
return gateway.registerTaskManager(taskManagerRpcAddress, taskManagerLocation,
leaderId, Time.milliseconds(timeoutMillis));
}
}
/**
* Internal state of the service
*/
private enum State {
CREATED, STARTED, STOPPED
}
}
|
|
package apollo.gui.genomemap;
import java.awt.*;
import java.util.*;
import javax.swing.*;
import apollo.gui.Transformer;
import apollo.gui.event.*;
import apollo.gui.Controller;
import org.apache.log4j.*;
/**
* A basic, non useful, implementation of the LinearViewI interface.
*/
public abstract class LinearView implements ViewI {
// -----------------------------------------------------------------------
// Class/static variables
// -----------------------------------------------------------------------
protected final static Logger logger = LogManager.getLogger(LinearView.class);
// -----------------------------------------------------------------------
// Instance variables
// -----------------------------------------------------------------------
//needed to get the Graphics to draw on
protected JComponent apollo_panel;
protected Transformer transformer;
protected Graphics graphics;
private String name;
private boolean invalid;
private Vector viewListeners = new Vector();
protected Color backgroundColour = Color.white;
protected Color foregroundColour = Color.black;
protected Rectangle viewBounds;
protected boolean visible = true;
protected boolean debug = false;
protected boolean limitsSet = false;
protected boolean transparent = false;
/** Not sure if LinearView should know strand. Defaults to forward */
private int strand = 1;
protected java.util.List<VisibilityListener> visibilityListeners =
new LinkedList<VisibilityListener>();
public LinearView(JComponent ap, String name, boolean visible) {
init(ap, name, visible);
}
protected void init(JComponent ap, String name, boolean visible) {
setVisible(visible);
setComponent(ap);
viewBounds = new Rectangle(1,1,1,1);
transformer = new Transformer(viewBounds);
transformer.setYRange(new int [] {-10000,10000});
transformer.setXCentre(0);
transformer.setYCentre(0);
setName(name);
}
// LinearViewI methods
// 1. ViewI
public void setComponent(JComponent ap) {
this.apollo_panel = ap;
}
/** LinearViews components that are part of an ApolloPanel
which is a JComponent */
public JComponent getComponent() {
return this.apollo_panel;
}
public void setInvalidity(boolean state) {
invalid = state;
}
public boolean isInvalid() {
return this.invalid;
}
public void setBounds(Rectangle rect) {
viewBounds = new Rectangle(rect);
setDrawBounds(viewBounds);
}
public Rectangle getBounds() {
return viewBounds;
}
public Rectangle getDrawBounds() {
return transformer.getPixelBounds();
}
public void setDrawBounds(Rectangle rect) {
transformer.setPixelBounds(rect);
}
public void setName(String name) {
this.name = new String(name);
}
public String getName() {
return name;
}
public void setGraphics(Graphics graphics) {
this.graphics = graphics;
}
public Graphics getGraphics() {
/* What happens if we don't use all of this rigamarole
and simply use the graphics we have in the view
*/
if (graphics == null) {
Image image_buffer = ((ApolloPanelI) getComponent()).getBackBuffer();
if (image_buffer != null) {
graphics = image_buffer.getGraphics();
} else {
logger.error ("Oh oh, now what??");
}
}
return graphics;
}
/** paintView draws a cross in the centre of the View
and a small cross in the upper left quadrant */
public void paintView() {
logger.error (this.getClass().getName() +
" needs to implement method for paintView()");
graphics.setColor(Color.white);
if (!transparent) {
graphics.fillRect(transformer.getPixelBounds().x,
transformer.getPixelBounds().y,
transformer.getPixelBounds().width,
transformer.getPixelBounds().height);
} else {
graphics.drawRect(transformer.getPixelBounds().x,
transformer.getPixelBounds().y,
transformer.getPixelBounds().width,
transformer.getPixelBounds().height);
}
graphics.setColor(Color.red);
Point start = new Point((int)transformer.getXMinimum()+11000,0);
Point end = new Point((int)transformer.getXMaximum()-11000,0);
Point pixstart = transformer.toPixel(start);
Point pixend = transformer.toPixel(end);
graphics.drawLine(pixstart.x,pixstart.y,pixend.x,pixend.y);
Point centre = new Point((int)(transformer.getXVisibleRange()[0]+
(transformer.getXVisibleRange()[1]-
transformer.getXVisibleRange()[0])/2),
transformer.getYVisibleRange()[0]+
(transformer.getYVisibleRange()[1]-
transformer.getYVisibleRange()[0])/2);
Point pixcentre = transformer.toPixel(centre);
logger.debug("Centre = " + centre);
logger.debug("PixCentre = " + pixcentre);
logger.debug("name = " + name);
graphics.drawString(name,pixcentre.x,pixcentre.y);
start = new Point(0,(int)transformer.getYMinimum()+1000);
end = new Point(0,(int)transformer.getYMaximum()-1000);
pixstart = transformer.toPixel(start);
pixend = transformer.toPixel(end);
graphics.drawLine(pixstart.x,pixstart.y,pixend.x,pixend.y);
start = new Point(-1000,1250);
end = new Point(-1500,1250);
pixstart = transformer.toPixel(start);
pixend = transformer.toPixel(end);
graphics.drawLine(pixstart.x,pixstart.y,pixend.x,pixend.y);
start = new Point(-1250,1000);
end = new Point(-1250,1500);
pixstart = transformer.toPixel(start);
pixend = transformer.toPixel(end);
graphics.drawLine(pixstart.x,pixstart.y,pixend.x,pixend.y);
}
public Transformer getTransform() {
return this.transformer;
}
public void setTransform(Transformer transformer) {
this.transformer = transformer;
}
// 2. LinearViewI
public void setLimits(int [] limits) {
transformer.setXRange(limits);
limitsSet = true;
}
public boolean areLimitsSet() {
return limitsSet;
}
public void setLimitsSet(boolean state) {
limitsSet = state;
}
public void setMinimum(int min) {
transformer.setXMinimum(min);
}
public void setMaximum(int max) {
transformer.setXMaximum(max);
}
/** Limits not necasarily equal to seq start and end,
* Component.syncViewLimits pads out the limits beyond sequence
*/
public int [] getLimits() {
return transformer.getXRange();
}
public int getMaximum() {
return transformer.getXMaximum();
}
public int getMinimum() {
return transformer.getXMinimum();
}
public void setCentre(int centre) {
transformer.setXCentre(centre);
}
public int getCentre() {
return transformer.getXCentre();
}
public Rectangle getPreferredSize() {
return getBounds();
}
public void setZoomFactor(double factor) {
transformer.setXZoomFactor(factor);
}
/** visible range in base pairs */
public int [] getVisibleRange() {
return transformer.getXVisibleRange();
}
public void setVisible(boolean state, boolean remove)
{
visible = state;
if (isVisible())
setInvalidity(true);
if (getComponent() != null) {
((ApolloPanel)getComponent()).setInvalidity(true);
getComponent().doLayout();
((ApolloPanel)getComponent()).setInvalidity(false);
}
for (VisibilityListener l : visibilityListeners) {
l.visibilityChanged(new VisibilityEvent(this, remove));
}
}
public void setVisible(boolean state) {
setVisible(state, false);
}
public boolean isVisible() {
return visible;
}
public void setDebug(boolean state) {
debug = state;
}
// Event routines
public void addViewListener(ViewListener l) {
viewListeners.addElement(l);
}
public void fireViewEvent(ViewEvent evt) {
for (int i=0;i<viewListeners.size();i++) {
ViewListener l = (ViewListener)viewListeners.elementAt(i);
l.handleViewEvent(evt);
}
}
public void setBackgroundColour(Color colour) {
this.backgroundColour = colour;
}
public Color getBackgroundColour() {
return backgroundColour;
}
public void setForegroundColour(Color colour) {
this.foregroundColour = colour;
}
public Color getForegroundColour() {
return foregroundColour;
}
public void setTransparent(boolean state) {
transparent = state;
}
public boolean isTransparent() {
return transparent;
}
public void clear() {
logger.error (this.getClass().getName() +
" needs to implement clear method");
}
/**
* I moved strand from FeatureView to here, because scrolling is different on
* reverse strand than forward strand. But this is funny for subclasses like
* SequenceView that are not stranded.
*/
public void setStrand(int strand) {
this.strand = strand;
}
public int getStrand() {
return strand;
}
boolean isReverseStrand() {
return getStrand() == -1;
}
boolean isForwardStrand() {
return getStrand() == 1;
}
protected Rectangle getSelectionRectangle(Point pnt) {
if ((getTransform().getXOrientation() == Transformer.LEFT &&
getStrand() == 1) ||
(getTransform().getXOrientation() == Transformer.RIGHT &&
getStrand() == -1)) {
return new Rectangle(pnt.x-3,pnt.y-1,4,1);
} else {
return new Rectangle(pnt.x,pnt.y,4,1);
}
}
public void addVisibilityListener(VisibilityListener l)
{
visibilityListeners.add(l);
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.local;
import org.apache.ignite.internal.processors.cache.*;
import org.apache.ignite.internal.processors.cache.transactions.*;
import org.apache.ignite.internal.processors.cache.version.*;
import org.jetbrains.annotations.*;
import static org.apache.ignite.events.EventType.*;
/**
* Cache entry for local caches.
*/
@SuppressWarnings({"NonPrivateFieldAccessedInSynchronizedContext", "TooBroadScope"})
public class GridLocalCacheEntry extends GridCacheMapEntry {
/** Off-heap value pointer. */
private long valPtr;
/**
* @param ctx Cache registry.
* @param key Cache key.
* @param hash Key hash value.
* @param val Entry value.
* @param next Next entry in the linked list.
* @param hdrId Header id.
*/
public GridLocalCacheEntry(GridCacheContext ctx,
KeyCacheObject key,
int hash,
CacheObject val,
GridCacheMapEntry next,
int hdrId)
{
super(ctx, key, hash, val, next, hdrId);
}
/** {@inheritDoc} */
@Override public boolean isLocal() {
return true;
}
/**
* Add local candidate.
*
* @param threadId Owning thread ID.
* @param ver Lock version.
* @param timeout Timeout to acquire lock.
* @param reenter Reentry flag.
* @param tx Transaction flag.
* @param implicitSingle Implicit transaction flag.
* @return New candidate.
* @throws GridCacheEntryRemovedException If entry has been removed.
*/
@Nullable public GridCacheMvccCandidate addLocal(
long threadId,
GridCacheVersion ver,
long timeout,
boolean reenter,
boolean tx,
boolean implicitSingle) throws GridCacheEntryRemovedException {
GridCacheMvccCandidate prev;
GridCacheMvccCandidate cand;
GridCacheMvccCandidate owner;
CacheObject val;
boolean hasVal;
synchronized (this) {
checkObsolete();
GridCacheMvcc mvcc = mvccExtras();
if (mvcc == null) {
mvcc = new GridCacheMvcc(cctx);
mvccExtras(mvcc);
}
prev = mvcc.localOwner();
cand = mvcc.addLocal(
this,
threadId,
ver,
timeout,
reenter,
tx,
implicitSingle
);
owner = mvcc.localOwner();
val = this.val;
hasVal = hasValueUnlocked();
if (mvcc.isEmpty())
mvccExtras(null);
}
if (cand != null) {
if (!cand.reentry())
cctx.mvcc().addNext(cctx, cand);
// Event notification.
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_LOCKED))
cctx.events().addEvent(partition(), key, cand.nodeId(), cand, EVT_CACHE_OBJECT_LOCKED, val, hasVal,
val, hasVal, null, null, null);
}
checkOwnerChanged(prev, owner);
return cand;
}
/**
*
* @param cand Candidate.
* @return Current owner.
*/
@Nullable public GridCacheMvccCandidate readyLocal(GridCacheMvccCandidate cand) {
GridCacheMvccCandidate prev = null;
GridCacheMvccCandidate owner = null;
synchronized (this) {
GridCacheMvcc mvcc = mvccExtras();
if (mvcc != null) {
prev = mvcc.localOwner();
owner = mvcc.readyLocal(cand);
if (mvcc.isEmpty())
mvccExtras(null);
}
}
checkOwnerChanged(prev, owner);
return owner;
}
/**
*
* @param ver Candidate version.
* @return Current owner.
*/
@Nullable public GridCacheMvccCandidate readyLocal(GridCacheVersion ver) {
GridCacheMvccCandidate prev = null;
GridCacheMvccCandidate owner = null;
synchronized (this) {
GridCacheMvcc mvcc = mvccExtras();
if (mvcc != null) {
prev = mvcc.localOwner();
owner = mvcc.readyLocal(ver);
if (mvcc.isEmpty())
mvccExtras(null);
}
}
checkOwnerChanged(prev, owner);
return owner;
}
/** {@inheritDoc} */
@Override public boolean tmLock(IgniteInternalTx tx, long timeout) throws GridCacheEntryRemovedException {
GridCacheMvccCandidate cand = addLocal(
tx.threadId(),
tx.xidVersion(),
timeout,
/*reenter*/false,
/*tx*/true,
tx.implicitSingle()
);
if (cand != null) {
readyLocal(cand);
return true;
}
return false;
}
/**
* Rechecks if lock should be reassigned.
*
* @return Current owner.
*/
@Nullable public GridCacheMvccCandidate recheck() {
GridCacheMvccCandidate prev = null;
GridCacheMvccCandidate owner = null;
synchronized (this) {
GridCacheMvcc mvcc = mvccExtras();
if (mvcc != null) {
prev = mvcc.localOwner();
owner = mvcc.recheck();
if (mvcc.isEmpty())
mvccExtras(null);
}
}
checkOwnerChanged(prev, owner);
return owner;
}
/**
* @param prev Previous owner.
* @param owner Current owner.
*/
private void checkOwnerChanged(GridCacheMvccCandidate prev, GridCacheMvccCandidate owner) {
assert !Thread.holdsLock(this);
if (owner != prev) {
cctx.mvcc().callback().onOwnerChanged(this, prev, owner);
if (owner != null)
checkThreadChain(owner);
}
}
/**
* @param owner Starting candidate in the chain.
*/
private void checkThreadChain(GridCacheMvccCandidate owner) {
assert !Thread.holdsLock(this);
assert owner != null;
assert owner.owner() || owner.used() : "Neither owner or used flags are set on ready local candidate: " +
owner;
if (owner.next() != null) {
for (GridCacheMvccCandidate cand = owner.next(); cand != null; cand = cand.next()) {
assert cand.local();
// Allow next lock in the thread to proceed.
if (!cand.used()) {
GridLocalCacheEntry e = (GridLocalCacheEntry)cctx.cache().peekEx(cand.key());
// At this point candidate may have been removed and entry destroyed,
// so we check for null.
if (e != null)
e.recheck();
break;
}
}
}
}
/**
* Unlocks lock if it is currently owned.
*
* @param tx Transaction to unlock.
*/
@Override public void txUnlock(IgniteInternalTx tx) throws GridCacheEntryRemovedException {
removeLock(tx.xidVersion());
}
/**
* Releases local lock.
*/
void releaseLocal() {
releaseLocal(Thread.currentThread().getId());
}
/**
* Releases local lock.
*
* @param threadId Thread ID.
*/
void releaseLocal(long threadId) {
GridCacheMvccCandidate prev = null;
GridCacheMvccCandidate owner = null;
CacheObject val;
boolean hasVal;
synchronized (this) {
GridCacheMvcc mvcc = mvccExtras();
if (mvcc != null) {
prev = mvcc.localOwner();
owner = mvcc.releaseLocal(threadId);
if (mvcc.isEmpty())
mvccExtras(null);
}
val = this.val;
hasVal = hasValueUnlocked();
}
if (prev != null && owner != prev) {
checkThreadChain(prev);
// Event notification.
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_UNLOCKED))
cctx.events().addEvent(partition(), key, prev.nodeId(), prev, EVT_CACHE_OBJECT_UNLOCKED, val, hasVal,
val, hasVal, null, null, null);
}
checkOwnerChanged(prev, owner);
}
/**
* Removes candidate regardless if it is owner or not.
*
* @param cand Candidate to remove.
* @throws GridCacheEntryRemovedException If the entry was removed by version other
* than one passed in.
*/
void removeLock(GridCacheMvccCandidate cand) throws GridCacheEntryRemovedException {
removeLock(cand.version());
}
/** {@inheritDoc} */
@Override public boolean removeLock(GridCacheVersion ver) throws GridCacheEntryRemovedException {
GridCacheMvccCandidate prev = null;
GridCacheMvccCandidate owner = null;
GridCacheMvccCandidate doomed;
CacheObject val;
boolean hasVal;
synchronized (this) {
GridCacheVersion obsoleteVer = obsoleteVersionExtras();
if (obsoleteVer != null && !obsoleteVer.equals(ver))
checkObsolete();
GridCacheMvcc mvcc = mvccExtras();
doomed = mvcc == null ? null : mvcc.candidate(ver);
if (doomed != null) {
prev = mvcc.localOwner();
owner = mvcc.remove(ver);
if (mvcc.isEmpty())
mvccExtras(null);
}
val = this.val;
hasVal = hasValueUnlocked();
}
if (doomed != null) {
checkThreadChain(doomed);
// Event notification.
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_UNLOCKED))
cctx.events().addEvent(partition(), key, doomed.nodeId(), doomed, EVT_CACHE_OBJECT_UNLOCKED,
val, hasVal, val, hasVal, null, null, null);
}
checkOwnerChanged(prev, owner);
return doomed != null;
}
/** {@inheritDoc} */
@Override protected boolean hasOffHeapPointer() {
return valPtr != 0;
}
/** {@inheritDoc} */
@Override protected long offHeapPointer() {
return valPtr;
}
/** {@inheritDoc} */
@Override protected void offHeapPointer(long valPtr) {
this.valPtr = valPtr;
}
}
|
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.common.xcontent.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
/**
* Abstract base class for allocating an unassigned shard to a node
*/
public abstract class AbstractAllocateAllocationCommand implements AllocationCommand {
private static final String INDEX_FIELD = "index";
private static final String SHARD_FIELD = "shard";
private static final String NODE_FIELD = "node";
protected static <T extends Builder<?>> ObjectParser<T, Void> createAllocateParser(String command) {
ObjectParser<T, Void> parser = new ObjectParser<>(command);
parser.declareString(Builder::setIndex, new ParseField(INDEX_FIELD));
parser.declareInt(Builder::setShard, new ParseField(SHARD_FIELD));
parser.declareString(Builder::setNode, new ParseField(NODE_FIELD));
return parser;
}
/**
* Works around ObjectParser not supporting constructor arguments.
*/
protected abstract static class Builder<T extends AbstractAllocateAllocationCommand> {
protected String index;
protected int shard = -1;
protected String node;
public void setIndex(String index) {
this.index = index;
}
public void setShard(int shard) {
this.shard = shard;
}
public void setNode(String node) {
this.node = node;
}
public abstract Builder<T> parse(XContentParser parser) throws IOException;
public abstract T build();
protected void validate() {
if (index == null) {
throw new IllegalArgumentException("Argument [index] must be defined");
}
if (shard < 0) {
throw new IllegalArgumentException("Argument [shard] must be defined and non-negative");
}
if (node == null) {
throw new IllegalArgumentException("Argument [node] must be defined");
}
}
}
protected final String index;
protected final int shardId;
protected final String node;
protected AbstractAllocateAllocationCommand(String index, int shardId, String node) {
this.index = index;
this.shardId = shardId;
this.node = node;
}
/**
* Read from a stream.
*/
protected AbstractAllocateAllocationCommand(StreamInput in) throws IOException {
index = in.readString();
shardId = in.readVInt();
node = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
out.writeVInt(shardId);
out.writeString(node);
}
/**
* Get the index name
*
* @return name of the index
*/
public String index() {
return this.index;
}
/**
* Get the shard id
*
* @return id of the shard
*/
public int shardId() {
return this.shardId;
}
/**
* Get the id of the node
*
* @return id of the node
*/
public String node() {
return this.node;
}
/**
* Handle case where a disco node cannot be found in the routing table. Usually means that it's not a data node.
*/
protected RerouteExplanation explainOrThrowMissingRoutingNode(RoutingAllocation allocation, boolean explain, DiscoveryNode discoNode) {
if (discoNode.canContainData() == false) {
return explainOrThrowRejectedCommand(explain, allocation, "allocation can only be done on data nodes, not [" + node + "]");
} else {
return explainOrThrowRejectedCommand(explain, allocation, "could not find [" + node + "] among the routing nodes");
}
}
/**
* Utility method for rejecting the current allocation command based on provided reason
*/
protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, RoutingAllocation allocation, String reason) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, name() + " (allocation command)", reason));
}
throw new IllegalArgumentException("[" + name() + "] " + reason);
}
/**
* Utility method for rejecting the current allocation command based on provided exception
*/
protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, RoutingAllocation allocation, RuntimeException rte) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, name() + " (allocation command)", rte.getMessage()));
}
throw rte;
}
/**
* Initializes an unassigned shard on a node and removes it from the unassigned
*
* @param allocation the allocation
* @param routingNodes the routing nodes
* @param routingNode the node to initialize it to
* @param shardRouting the shard routing that is to be matched in unassigned shards
*/
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes,
RoutingNode routingNode, ShardRouting shardRouting) {
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, null);
}
/**
* Initializes an unassigned shard on a node and removes it from the unassigned
*
* @param allocation the allocation
* @param routingNodes the routing nodes
* @param routingNode the node to initialize it to
* @param shardRouting the shard routing that is to be matched in unassigned shards
* @param unassignedInfo unassigned info to override
* @param recoverySource recovery source to override
*/
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode,
ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo,
@Nullable RecoverySource recoverySource) {
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
ShardRouting unassigned = it.next();
if (unassigned.equalsIgnoringMetadata(shardRouting) == false) {
continue;
}
if (unassignedInfo != null || recoverySource != null) {
unassigned = it.updateUnassigned(unassignedInfo != null ? unassignedInfo : unassigned.unassignedInfo(),
recoverySource != null ? recoverySource : unassigned.recoverySource(), allocation.changes());
}
it.initialize(routingNode.nodeId(), null,
allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
return;
}
assert false : "shard to initialize not found in list of unassigned shards";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
builder.field(INDEX_FIELD, index());
builder.field(SHARD_FIELD, shardId());
builder.field(NODE_FIELD, node());
extraXContent(builder);
return builder.endObject();
}
protected void extraXContent(XContentBuilder builder) throws IOException {
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
AbstractAllocateAllocationCommand other = (AbstractAllocateAllocationCommand) obj;
// Override equals and hashCode for testing
return Objects.equals(index, other.index) &&
Objects.equals(shardId, other.shardId) &&
Objects.equals(node, other.node);
}
@Override
public int hashCode() {
// Override equals and hashCode for testing
return Objects.hash(index, shardId, node);
}
}
|
|
package com.wavefront.agent.preprocessor;
import com.google.common.collect.Lists;
import com.wavefront.ingester.GraphiteDecoder;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.InvocationTargetException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import static org.junit.Assert.*;
import sunnylabs.report.ReportPoint;
public class PreprocessorRulesTest {
private static AgentPreprocessorConfiguration config;
private final static List<String> emptyCustomSourceTags = Collections.emptyList();
private final GraphiteDecoder decoder = new GraphiteDecoder(emptyCustomSourceTags);
@BeforeClass
public static void setup() throws IOException {
InputStream stream = PreprocessorRulesTest.class.getResourceAsStream("preprocessor_rules.yaml");
config = new AgentPreprocessorConfiguration();
config.loadFromStream(stream);
}
@Test
public void testPointInRangeCorrectForTimeRanges() throws NoSuchMethodException, InvocationTargetException,
IllegalAccessException {
long millisPerYear = 31536000000L;
long millisPerDay = 86400000L;
AnnotatedPredicate<ReportPoint> pointInRange1year = new ReportPointTimestampInRangeFilter(8760);
// not in range if over a year ago
ReportPoint rp = new ReportPoint("some metric", System.currentTimeMillis() - millisPerYear, 10L, "host", "table",
new HashMap<String, String>());
Assert.assertFalse(pointInRange1year.apply(rp));
rp.setTimestamp(System.currentTimeMillis() - millisPerYear - 1);
Assert.assertFalse(pointInRange1year.apply(rp));
// in range if within a year ago
rp.setTimestamp(System.currentTimeMillis() - (millisPerYear / 2));
Assert.assertTrue(pointInRange1year.apply(rp));
// in range for right now
rp.setTimestamp(System.currentTimeMillis());
Assert.assertTrue(pointInRange1year.apply(rp));
// in range if within a day in the future
rp.setTimestamp(System.currentTimeMillis() + millisPerDay - 1);
Assert.assertTrue(pointInRange1year.apply(rp));
// out of range for over a day in the future
rp.setTimestamp(System.currentTimeMillis() + (millisPerDay * 2));
Assert.assertFalse(pointInRange1year.apply(rp));
// now test with 1 day limit
AnnotatedPredicate<ReportPoint> pointInRange1day = new ReportPointTimestampInRangeFilter(24);
rp.setTimestamp(System.currentTimeMillis() - millisPerDay - 1);
Assert.assertFalse(pointInRange1day.apply(rp));
// in range if within 1 day ago
rp.setTimestamp(System.currentTimeMillis() - (millisPerDay / 2));
Assert.assertTrue(pointInRange1day.apply(rp));
// in range for right now
rp.setTimestamp(System.currentTimeMillis());
Assert.assertTrue(pointInRange1day.apply(rp));
}
@Test(expected = NullPointerException.class)
public void testLineReplaceRegexNullMatchThrows() {
// try to create a regex replace rule with a null match pattern
PointLineReplaceRegexTransformer invalidRule = new PointLineReplaceRegexTransformer(null, "foo", null, null);
}
@Test(expected = IllegalArgumentException.class)
public void testLineReplaceRegexBlankMatchThrows() {
// try to create a regex replace rule with a blank match pattern
PointLineReplaceRegexTransformer invalidRule = new PointLineReplaceRegexTransformer("", "foo", null, null);
}
@Test(expected = NullPointerException.class)
public void testLineWhitelistRegexNullMatchThrows() {
// try to create a whitelist rule with a null match pattern
PointLineWhitelistRegexFilter invalidRule = new PointLineWhitelistRegexFilter(null, null);
}
@Test(expected = NullPointerException.class)
public void testLineBlacklistRegexNullMatchThrows() {
// try to create a blacklist rule with a null match pattern
PointLineBlacklistRegexFilter invalidRule = new PointLineBlacklistRegexFilter(null, null);
}
@Test(expected = NullPointerException.class)
public void testPointBlacklistRegexNullScopeThrows() {
// try to create a blacklist rule with a null scope
ReportPointBlacklistRegexFilter invalidRule = new ReportPointBlacklistRegexFilter(null, "foo", null);
}
@Test(expected = NullPointerException.class)
public void testPointBlacklistRegexNullMatchThrows() {
// try to create a blacklist rule with a null pattern
ReportPointBlacklistRegexFilter invalidRule = new ReportPointBlacklistRegexFilter("foo", null, null);
}
@Test(expected = NullPointerException.class)
public void testPointWhitelistRegexNullScopeThrows() {
// try to create a whitelist rule with a null scope
ReportPointWhitelistRegexFilter invalidRule = new ReportPointWhitelistRegexFilter(null, "foo", null);
}
@Test(expected = NullPointerException.class)
public void testPointWhitelistRegexNullMatchThrows() {
// try to create a blacklist rule with a null pattern
ReportPointWhitelistRegexFilter invalidRule = new ReportPointWhitelistRegexFilter("foo", null, null);
}
@Test
public void testPointLineRules() {
String testPoint1 = "collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=baz";
String testPoint2 = "collectd.#cpu#.&loadavg^.1m 7 1459527231 source=source$hostname foo=bar boo=baz";
PointLineReplaceRegexTransformer rule1 = new PointLineReplaceRegexTransformer("(boo)=baz", "$1=qux", null, null);
PointLineReplaceRegexTransformer rule2 = new PointLineReplaceRegexTransformer("[#&\\$\\^]", "", null, null);
PointLineBlacklistRegexFilter rule3 = new PointLineBlacklistRegexFilter(".*source=source.*", null);
PointLineWhitelistRegexFilter rule4 = new PointLineWhitelistRegexFilter(".*source=source.*", null);
PointLineReplaceRegexTransformer rule5 = new PointLineReplaceRegexTransformer("cpu", "gpu", ".*hostname.*", null);
PointLineReplaceRegexTransformer rule6 = new PointLineReplaceRegexTransformer("cpu", "gpu", ".*nomatch.*", null);
String expectedPoint1 = "collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=qux";
String expectedPoint2 = "collectd.cpu.loadavg.1m 7 1459527231 source=sourcehostname foo=bar boo=baz";
String expectedPoint5 = "collectd.gpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=baz";
assertEquals(expectedPoint1, rule1.apply(testPoint1));
assertEquals(expectedPoint2, rule2.apply(testPoint2));
assertTrue(rule3.apply(testPoint1));
assertFalse(rule3.apply(testPoint2));
assertFalse(rule4.apply(testPoint1));
assertTrue(rule4.apply(testPoint2));
assertEquals(expectedPoint5, rule5.apply(testPoint1));
assertEquals(testPoint1, rule6.apply(testPoint1));
}
@Test
public void testReportPointRules() {
String pointLine = "\"some metric\" 10.0 1469751813 source=\"host\" \"boo\"=\"baz\" \"foo\"=\"bar\"";
ReportPoint point = parsePointLine(pointLine);
// try to remove a point tag when value doesn't match the regex - shouldn't change
new ReportPointDropTagTransformer("foo", "bar(never|match)", null).apply(point);
assertEquals(pointLine, referencePointToStringImpl(point));
// try to remove a point tag when value does match the regex - should work
new ReportPointDropTagTransformer("foo", "ba.", null).apply(point);
String expectedPoint1 = "\"some metric\" 10.0 1469751813 source=\"host\" \"boo\"=\"baz\"";
assertEquals(expectedPoint1, referencePointToStringImpl(point));
// try to remove a point tag without a regex specified - should work
new ReportPointDropTagTransformer("boo", null, null).apply(point);
String expectedPoint2 = "\"some metric\" 10.0 1469751813 source=\"host\"";
assertEquals(expectedPoint2, referencePointToStringImpl(point));
// add a point tag back
new ReportPointAddTagTransformer("boo", "baz", null).apply(point);
String expectedPoint3 = "\"some metric\" 10.0 1469751813 source=\"host\" \"boo\"=\"baz\"";
assertEquals(expectedPoint3, referencePointToStringImpl(point));
// try to add a duplicate point tag - shouldn't change
new ReportPointAddTagIfNotExistsTransformer("boo", "bar", null).apply(point);
assertEquals(expectedPoint3, referencePointToStringImpl(point));
// add another point tag back - should work this time
new ReportPointAddTagIfNotExistsTransformer("foo", "bar", null).apply(point);
assertEquals(pointLine, referencePointToStringImpl(point));
// rename a point tag - should work
new ReportPointRenameTagTransformer("foo", "qux", null, null).apply(point);
String expectedPoint4 = "\"some metric\" 10.0 1469751813 source=\"host\" \"boo\"=\"baz\" \"qux\"=\"bar\"";
assertEquals(expectedPoint4, referencePointToStringImpl(point));
// rename a point tag matching the regex - should work
new ReportPointRenameTagTransformer("boo", "foo", "b[a-z]z", null).apply(point);
String expectedPoint5 = "\"some metric\" 10.0 1469751813 source=\"host\" \"foo\"=\"baz\" \"qux\"=\"bar\"";
assertEquals(expectedPoint5, referencePointToStringImpl(point));
// try to rename a point tag that doesn't match the regex - shouldn't change
new ReportPointRenameTagTransformer("foo", "boo", "wat", null).apply(point);
assertEquals(expectedPoint5, referencePointToStringImpl(point));
// add null metrics prefix - shouldn't change
new ReportPointAddPrefixTransformer(null).apply(point);
assertEquals(expectedPoint5, referencePointToStringImpl(point));
// add blank metrics prefix - shouldn't change
new ReportPointAddPrefixTransformer("").apply(point);
assertEquals(expectedPoint5, referencePointToStringImpl(point));
// add metrics prefix - should work
new ReportPointAddPrefixTransformer("prefix").apply(point);
String expectedPoint6 = "\"prefix.some metric\" 10.0 1469751813 source=\"host\" \"foo\"=\"baz\" \"qux\"=\"bar\"";
assertEquals(expectedPoint6, referencePointToStringImpl(point));
// replace regex in metric name, no matches - shouldn't change
new ReportPointReplaceRegexTransformer("metricName", "Z", "", null, null).apply(point);
assertEquals(expectedPoint6, referencePointToStringImpl(point));
// replace regex in metric name - shouldn't affect anything else
new ReportPointReplaceRegexTransformer("metricName", "o", "0", null, null).apply(point);
String expectedPoint7 = "\"prefix.s0me metric\" 10.0 1469751813 source=\"host\" \"foo\"=\"baz\" \"qux\"=\"bar\"";
assertEquals(expectedPoint7, referencePointToStringImpl(point));
// replace regex in source name - shouldn't affect anything else
new ReportPointReplaceRegexTransformer("sourceName", "o", "0", null, null).apply(point);
String expectedPoint8 = "\"prefix.s0me metric\" 10.0 1469751813 source=\"h0st\" \"foo\"=\"baz\" \"qux\"=\"bar\"";
assertEquals(expectedPoint8, referencePointToStringImpl(point));
// replace regex in a point tag value - shouldn't affect anything else
new ReportPointReplaceRegexTransformer("foo", "b", "z", null, null).apply(point);
String expectedPoint9 = "\"prefix.s0me metric\" 10.0 1469751813 source=\"h0st\" \"foo\"=\"zaz\" \"qux\"=\"bar\"";
assertEquals(expectedPoint9, referencePointToStringImpl(point));
// replace regex in a point tag value with matching groups
new ReportPointReplaceRegexTransformer("qux", "([a-c][a-c]).", "$1z", null, null).apply(point);
String expectedPoint10 = "\"prefix.s0me metric\" 10.0 1469751813 source=\"h0st\" \"foo\"=\"zaz\" \"qux\"=\"baz\"";
assertEquals(expectedPoint10, referencePointToStringImpl(point));
}
@Test
public void testAgentPreprocessorForPointLine() {
// test point line transformers
String testPoint1 = "collectd.#cpu#.&load$avg^.1m 7 1459527231 source=source$hostname foo=bar boo=baz";
String expectedPoint1 = "collectd._cpu_._load_avg^.1m 7 1459527231 source=source_hostname foo=bar boo=baz";
assertEquals(expectedPoint1, config.forPort("2878").forPointLine().transform(testPoint1));
// test filters
String testPoint2 = "collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=baz";
assertTrue(config.forPort("2878").forPointLine().filter(testPoint2));
String testPoint3 = "collectd.cpu.loadavg.1m 7 1459527231 source=hostname bar=foo boo=baz";
assertFalse(config.forPort("2878").forPointLine().filter(testPoint3));
}
@Test
public void testAgentPreprocessorForReportPoint() {
ReportPoint testPoint1 = parsePointLine("collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=baz");
assertTrue(config.forPort("2878").forReportPoint().filter(testPoint1));
ReportPoint testPoint2 = parsePointLine("foo.collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=baz");
assertFalse(config.forPort("2878").forReportPoint().filter(testPoint2));
ReportPoint testPoint3 = parsePointLine("collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=west123 boo=baz");
assertFalse(config.forPort("2878").forReportPoint().filter(testPoint3));
ReportPoint testPoint4 = parsePointLine("collectd.cpu.loadavg.1m 7 1459527231 source=bar123 foo=bar boo=baz");
assertFalse(config.forPort("2878").forReportPoint().filter(testPoint4));
// in this test we are confirming that the rule sets for different ports are in fact different
// on port 2878 we add "newtagkey=1", on port 4242 we don't
ReportPoint testPoint1a = parsePointLine("collectd.cpu.loadavg.1m 7 1459527231 source=hostname foo=bar boo=baz");
config.forPort("2878").forReportPoint().transform(testPoint1);
config.forPort("4242").forReportPoint().transform(testPoint1a);
String expectedPoint1 = "\"collectd.cpu.loadavg.1m\" 7.0 1459527231 " +
"source=\"hostname\" \"baz\"=\"bar\" \"boo\"=\"baz\" \"newtagkey\"=\"1\"";
String expectedPoint1a = "\"collectd.cpu.loadavg.1m\" 7.0 1459527231 " +
"source=\"hostname\" \"baz\"=\"bar\" \"boo\"=\"baz\"";
assertEquals(expectedPoint1, referencePointToStringImpl(testPoint1));
assertEquals(expectedPoint1a, referencePointToStringImpl(testPoint1a));
// in this test the following should happen:
// - rename foo tag to baz
// - "metrictest." prefix gets dropped from the metric name
// - replace dashes with dots in bar tag
String expectedPoint5 = "\"metric\" 7.0 1459527231 source=\"src\" " +
"\"bar\"=\"baz.baz.baz\" \"baz\"=\"bar\" \"datacenter\"=\"az1\" \"newtagkey\"=\"1\" \"qux\"=\"123z\"";
assertEquals(expectedPoint5, applyAllTransformers(
"metrictest.metric 7 1459527231 source=src foo=bar datacenter=az1 bar=baz-baz-baz qux=123z", "2878"));
// in this test the following should happen:
// - rename tag foo to baz
// - add new tag newtagkey=1
// - drop dc1 tag
// - drop datacenter tag as it matches az[4-6]
// - rename qux tag to numericTag
String expectedPoint6 = "\"some.metric\" 7.0 1459527231 source=\"hostname\" " +
"\"baz\"=\"bar\" \"newtagkey\"=\"1\" \"numericTag\"=\"12345\" \"prefix\"=\"some\"";
assertEquals(expectedPoint6, applyAllTransformers(
"some.metric 7 1459527231 source=hostname foo=bar dc1=baz datacenter=az4 qux=12345", "2878"));
}
@Test
public void testAllFilters() {
assertTrue(applyAllFilters("valid.metric.loadavg.1m 7 1459527231 source=h.prod.corp foo=bar boo=baz", "1111"));
assertTrue(applyAllFilters("valid.metric.loadavg.1m 7 1459527231 source=h.prod.corp foo=b_r boo=baz", "1111"));
assertTrue(applyAllFilters("valid.metric.loadavg.1m 7 1459527231 source=h.prod.corp foo=b_r boo=baz", "1111"));
assertFalse(applyAllFilters("invalid.metric.loadavg.1m 7 1459527231 source=h.prod.corp foo=bar boo=baz", "1111"));
assertFalse(applyAllFilters("valid.metric.loadavg.1m 7 1459527231 source=h.prod.corp foo=bar baz=boo", "1111"));
assertFalse(applyAllFilters("valid.metric.loadavg.1m 7 1459527231 source=h.dev.corp foo=bar boo=baz", "1111"));
assertFalse(applyAllFilters("valid.metric.loadavg.1m 7 1459527231 source=h.prod.corp foo=bar boo=stop", "1111"));
assertFalse(applyAllFilters("loadavg.1m 7 1459527231 source=h.prod.corp foo=bar boo=baz", "1111"));
}
private boolean applyAllFilters(String pointLine, String strPort) {
if (!config.forPort(strPort).forPointLine().filter(pointLine))
return false;
ReportPoint point = parsePointLine(pointLine);
return config.forPort(strPort).forReportPoint().filter(point);
}
private String applyAllTransformers(String pointLine, String strPort) {
String transformedPointLine = config.forPort(strPort).forPointLine().transform(pointLine);
ReportPoint point = parsePointLine(transformedPointLine);
config.forPort(strPort).forReportPoint().transform(point);
return referencePointToStringImpl(point);
}
private static String referencePointToStringImpl(ReportPoint point) {
String toReturn = String.format("\"%s\" %s %d source=\"%s\"",
point.getMetric().replaceAll("\"", "\\\""),
point.getValue(),
point.getTimestamp() / 1000,
point.getHost().replaceAll("\"", "\\\""));
for (Map.Entry<String, String> entry : point.getAnnotations().entrySet()) {
toReturn += String.format(" \"%s\"=\"%s\"",
entry.getKey().replaceAll("\"", "\\\""),
entry.getValue().replaceAll("\"", "\\\""));
}
return toReturn;
}
private ReportPoint parsePointLine(String pointLine) {
List<ReportPoint> points = Lists.newArrayListWithExpectedSize(1);
decoder.decodeReportPoints(pointLine, points, "dummy");
ReportPoint point = points.get(0);
// convert annotations to TreeMap so the result is deterministic
point.setAnnotations(new TreeMap<>(point.getAnnotations()));
return point;
}
}
|
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.config;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Sets;
import org.apache.cassandra.config.EncryptionOptions.ClientEncryptionOptions;
import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
/**
* A class that contains configuration properties for the cassandra node it runs within.
*
* Properties declared as volatile can be mutated via JMX.
*/
public class Config
{
/*
* Prefix for Java properties for internal Cassandra configuration options
*/
public static final String PROPERTY_PREFIX = "cassandra.";
public String cluster_name = "Test Cluster";
public String authenticator;
public String authorizer;
public String role_manager;
public volatile int permissions_validity_in_ms = 2000;
public int permissions_cache_max_entries = 1000;
public volatile int permissions_update_interval_in_ms = -1;
public volatile int roles_validity_in_ms = 2000;
public int roles_cache_max_entries = 1000;
public volatile int roles_update_interval_in_ms = -1;
/* Hashing strategy Random or OPHF */
public String partitioner;
public Boolean auto_bootstrap = true;
public volatile boolean hinted_handoff_enabled = true;
public Set<String> hinted_handoff_disabled_datacenters = Sets.newConcurrentHashSet();
public volatile Integer max_hint_window_in_ms = 3 * 3600 * 1000; // three hours
public String hints_directory;
public ParameterizedClass seed_provider;
public DiskAccessMode disk_access_mode = DiskAccessMode.auto;
public DiskFailurePolicy disk_failure_policy = DiskFailurePolicy.ignore;
public CommitFailurePolicy commit_failure_policy = CommitFailurePolicy.stop;
/* initial token in the ring */
public String initial_token;
public Integer num_tokens = 1;
/** Triggers automatic allocation of tokens if set, using the replication strategy of the referenced keyspace */
public String allocate_tokens_for_keyspace = null;
public volatile Long request_timeout_in_ms = 10000L;
public volatile Long read_request_timeout_in_ms = 5000L;
public volatile Long range_request_timeout_in_ms = 10000L;
public volatile Long write_request_timeout_in_ms = 2000L;
public volatile Long counter_write_request_timeout_in_ms = 5000L;
public volatile Long cas_contention_timeout_in_ms = 1000L;
public volatile Long truncate_request_timeout_in_ms = 60000L;
public Integer streaming_socket_timeout_in_ms = 3600000;
public boolean cross_node_timeout = false;
public volatile Double phi_convict_threshold = 8.0;
public Integer concurrent_reads = 32;
public Integer concurrent_writes = 32;
public Integer concurrent_counter_writes = 32;
public Integer concurrent_materialized_view_writes = 32;
@Deprecated
public Integer concurrent_replicates = null;
public Integer memtable_flush_writers = null;
public Integer memtable_heap_space_in_mb;
public Integer memtable_offheap_space_in_mb;
public Float memtable_cleanup_threshold = null;
public Integer storage_port = 7000;
public Integer ssl_storage_port = 7001;
public String listen_address;
public String listen_interface;
public Boolean listen_interface_prefer_ipv6 = false;
public String broadcast_address;
public String internode_authenticator;
/* intentionally left set to true, despite being set to false in stock 2.2 cassandra.yaml
we don't want to surprise Thrift users who have the setting blank in the yaml during 2.1->2.2 upgrade */
public Boolean start_rpc = true;
public String rpc_address;
public String rpc_interface;
public Boolean rpc_interface_prefer_ipv6 = false;
public String broadcast_rpc_address;
public Integer rpc_port = 9160;
public Integer rpc_listen_backlog = 50;
public String rpc_server_type = "sync";
public Boolean rpc_keepalive = true;
public Integer rpc_min_threads = 16;
public Integer rpc_max_threads = Integer.MAX_VALUE;
public Integer rpc_send_buff_size_in_bytes;
public Integer rpc_recv_buff_size_in_bytes;
public Integer internode_send_buff_size_in_bytes;
public Integer internode_recv_buff_size_in_bytes;
public Boolean start_native_transport = false;
public Integer native_transport_port = 9042;
public Integer native_transport_port_ssl = null;
public Integer native_transport_max_threads = 128;
public Integer native_transport_max_frame_size_in_mb = 256;
public volatile Long native_transport_max_concurrent_connections = -1L;
public volatile Long native_transport_max_concurrent_connections_per_ip = -1L;
@Deprecated
public Integer thrift_max_message_length_in_mb = 16;
public Integer thrift_framed_transport_size_in_mb = 15;
public Boolean snapshot_before_compaction = false;
public Boolean auto_snapshot = true;
/* if the size of columns or super-columns are more than this, indexing will kick in */
public Integer column_index_size_in_kb = 64;
public volatile int batch_size_warn_threshold_in_kb = 5;
public volatile int batch_size_fail_threshold_in_kb = 50;
public Integer concurrent_compactors;
public volatile Integer compaction_throughput_mb_per_sec = 16;
public volatile Integer compaction_large_partition_warning_threshold_mb = 100;
public Integer max_streaming_retries = 3;
public volatile Integer stream_throughput_outbound_megabits_per_sec = 200;
public volatile Integer inter_dc_stream_throughput_outbound_megabits_per_sec = 0;
public String[] data_file_directories = new String[0];
public String saved_caches_directory;
// Commit Log
public String commitlog_directory;
public Integer commitlog_total_space_in_mb;
public CommitLogSync commitlog_sync;
public Double commitlog_sync_batch_window_in_ms;
public Integer commitlog_sync_period_in_ms;
public int commitlog_segment_size_in_mb = 32;
public ParameterizedClass commitlog_compression;
public int commitlog_max_compression_buffers_in_pool = 3;
public TransparentDataEncryptionOptions transparent_data_encryption_options = new TransparentDataEncryptionOptions();
public Integer max_mutation_size_in_kb;
@Deprecated
public int commitlog_periodic_queue_size = -1;
public String endpoint_snitch;
public Boolean dynamic_snitch = true;
public Integer dynamic_snitch_update_interval_in_ms = 100;
public Integer dynamic_snitch_reset_interval_in_ms = 600000;
public Double dynamic_snitch_badness_threshold = 0.1;
public String request_scheduler;
public RequestSchedulerId request_scheduler_id;
public RequestSchedulerOptions request_scheduler_options;
public ServerEncryptionOptions server_encryption_options = new ServerEncryptionOptions();
public ClientEncryptionOptions client_encryption_options = new ClientEncryptionOptions();
// this encOptions is for backward compatibility (a warning is logged by DatabaseDescriptor)
public ServerEncryptionOptions encryption_options;
public InternodeCompression internode_compression = InternodeCompression.none;
@Deprecated
public Integer index_interval = null;
public int hinted_handoff_throttle_in_kb = 1024;
public int batchlog_replay_throttle_in_kb = 1024;
public int max_hints_delivery_threads = 2;
public int hints_flush_period_in_ms = 10000;
public int max_hints_file_size_in_mb = 128;
public int sstable_preemptive_open_interval_in_mb = 50;
public volatile boolean incremental_backups = false;
public boolean trickle_fsync = false;
public int trickle_fsync_interval_in_kb = 10240;
public Long key_cache_size_in_mb = null;
public volatile int key_cache_save_period = 14400;
public volatile int key_cache_keys_to_save = Integer.MAX_VALUE;
public String row_cache_class_name = "org.apache.cassandra.cache.OHCProvider";
public long row_cache_size_in_mb = 0;
public volatile int row_cache_save_period = 0;
public volatile int row_cache_keys_to_save = Integer.MAX_VALUE;
public Long counter_cache_size_in_mb = null;
public volatile int counter_cache_save_period = 7200;
public volatile int counter_cache_keys_to_save = Integer.MAX_VALUE;
private static boolean isClientMode = false;
public Integer file_cache_size_in_mb = 512;
public boolean buffer_pool_use_heap_if_exhausted = true;
public DiskOptimizationStrategy disk_optimization_strategy = DiskOptimizationStrategy.ssd;
public double disk_optimization_estimate_percentile = 0.95;
public double disk_optimization_page_cross_chance = 0.1;
public boolean inter_dc_tcp_nodelay = true;
public MemtableAllocationType memtable_allocation_type = MemtableAllocationType.heap_buffers;
private static boolean outboundBindAny = false;
public volatile int tombstone_warn_threshold = 1000;
public volatile int tombstone_failure_threshold = 100000;
public volatile Long index_summary_capacity_in_mb;
public volatile int index_summary_resize_interval_in_minutes = 60;
public int gc_warn_threshold_in_ms = 0;
// TTL for different types of trace events.
public int tracetype_query_ttl = (int) TimeUnit.DAYS.toSeconds(1);
public int tracetype_repair_ttl = (int) TimeUnit.DAYS.toSeconds(7);
/*
* Strategy to use for coalescing messages in OutboundTcpConnection.
* Can be fixed, movingaverage, timehorizon, disabled. Setting is case and leading/trailing
* whitespace insensitive. You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.
*/
public String otc_coalescing_strategy = "TIMEHORIZON";
/*
* How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first
* messgae is received before it will be sent with any accompanying messages. For moving average this is the
* maximum amount of time that will be waited as well as the interval at which messages must arrive on average
* for coalescing to be enabled.
*/
public static final int otc_coalescing_window_us_default = 200;
public int otc_coalescing_window_us = otc_coalescing_window_us_default;
public int windows_timer_interval = 0;
public boolean enable_user_defined_functions = false;
public boolean enable_scripted_user_defined_functions = false;
/**
* Optionally disable asynchronous UDF execution.
* Disabling asynchronous UDF execution also implicitly disables the security-manager!
* By default, async UDF execution is enabled to be able to detect UDFs that run too long / forever and be
* able to fail fast - i.e. stop the Cassandra daemon, which is currently the only appropriate approach to
* "tell" a user that there's something really wrong with the UDF.
* When you disable async UDF execution, users MUST pay attention to read-timeouts since these may indicate
* UDFs that run too long or forever - and this can destabilize the cluster.
*/
public boolean enable_user_defined_functions_threads = true;
/**
* Time in milliseconds after a warning will be emitted to the log and to the client that a UDF runs too long.
* (Only valid, if enable_user_defined_functions_threads==true)
*/
public long user_defined_function_warn_timeout = 500;
/**
* Time in milliseconds after a fatal UDF run-time situation is detected and action according to
* user_function_timeout_policy will take place.
* (Only valid, if enable_user_defined_functions_threads==true)
*/
public long user_defined_function_fail_timeout = 1500;
/**
* Defines what to do when a UDF ran longer than user_defined_function_fail_timeout.
* Possible options are:
* - 'die' - i.e. it is able to emit a warning to the client before the Cassandra Daemon will shut down.
* - 'die_immediate' - shut down C* daemon immediately (effectively prevent the chance that the client will receive a warning).
* - 'ignore' - just log - the most dangerous option.
* (Only valid, if enable_user_defined_functions_threads==true)
*/
public UserFunctionTimeoutPolicy user_function_timeout_policy = UserFunctionTimeoutPolicy.die;
public static boolean getOutboundBindAny()
{
return outboundBindAny;
}
public static void setOutboundBindAny(boolean value)
{
outboundBindAny = value;
}
public static boolean isClientMode()
{
return isClientMode;
}
public static void setClientMode(boolean clientMode)
{
isClientMode = clientMode;
}
public enum CommitLogSync
{
periodic,
batch
}
public enum InternodeCompression
{
all, none, dc
}
public enum DiskAccessMode
{
auto,
mmap,
mmap_index_only,
standard,
}
public enum MemtableAllocationType
{
unslabbed_heap_buffers,
heap_buffers,
offheap_buffers,
offheap_objects
}
public enum DiskFailurePolicy
{
best_effort,
stop,
ignore,
stop_paranoid,
die
}
public enum CommitFailurePolicy
{
stop,
stop_commit,
ignore,
die,
}
public enum UserFunctionTimeoutPolicy
{
ignore,
die,
die_immediate
}
public enum RequestSchedulerId
{
keyspace
}
public enum DiskOptimizationStrategy
{
ssd,
spinning
}
}
|
|
/*
* Copyright 2013-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.client.v3;
import org.cloudfoundry.AbstractIntegrationTest;
import org.cloudfoundry.ApplicationUtils;
import org.cloudfoundry.CloudFoundryVersion;
import org.cloudfoundry.IfCloudFoundryVersion;
import org.cloudfoundry.ServiceBrokerUtils;
import org.cloudfoundry.client.CloudFoundryClient;
import org.cloudfoundry.client.v3.servicebrokers.BasicAuthentication;
import org.cloudfoundry.client.v3.servicebrokers.CreateServiceBrokerRequest;
import org.cloudfoundry.client.v3.servicebrokers.DeleteServiceBrokerRequest;
import org.cloudfoundry.client.v3.servicebrokers.GetServiceBrokerRequest;
import org.cloudfoundry.client.v3.servicebrokers.ListServiceBrokersRequest;
import org.cloudfoundry.client.v3.servicebrokers.ServiceBrokerRelationships;
import org.cloudfoundry.client.v3.servicebrokers.UpdateServiceBrokerRequest;
import org.cloudfoundry.client.v3.spaces.CreateSpaceRequest;
import org.cloudfoundry.client.v3.spaces.CreateSpaceResponse;
import org.cloudfoundry.client.v3.spaces.SpaceRelationships;
import org.cloudfoundry.util.JobUtils;
import org.cloudfoundry.util.PaginationUtils;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ClassPathResource;
import reactor.core.Exceptions;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import java.io.IOException;
import java.nio.file.Path;
import java.time.Duration;
import static org.assertj.core.api.Assertions.assertThat;
import static org.cloudfoundry.ServiceBrokerUtils.createServiceBroker;
import static org.cloudfoundry.ServiceBrokerUtils.deleteServiceBroker;
@IfCloudFoundryVersion(greaterThanOrEqualTo = CloudFoundryVersion.PCF_2_10)
public final class ServiceBrokersTest extends AbstractIntegrationTest {
@Autowired
private CloudFoundryClient cloudFoundryClient;
@Autowired
private Mono<String> organizationId;
@Autowired
private Mono<String> serviceBrokerId;
@Autowired
private String serviceBrokerName;
@Test
public void create() {
String planName = this.nameFactory.getPlanName();
String serviceBrokerName = this.nameFactory.getServiceBrokerName();
String serviceName = this.nameFactory.getServiceName();
String spaceName = this.nameFactory.getSpaceName();
Path application;
try {
application = new ClassPathResource("test-service-broker.jar").getFile().toPath();
} catch (IOException e) {
throw Exceptions.propagate(e);
}
ApplicationUtils.ApplicationMetadata applicationMetadata = this.organizationId
.flatMap(organizationId -> createSpaceId(this.cloudFoundryClient, organizationId, spaceName))
.flatMap(spaceId -> ServiceBrokerUtils.pushServiceBrokerApplication(this.cloudFoundryClient, application, this.nameFactory, planName, serviceName, spaceId))
.block(Duration.ofMinutes(5));
this.cloudFoundryClient.serviceBrokersV3()
.create(CreateServiceBrokerRequest.builder()
.authentication(BasicAuthentication.builder()
.password("test-authentication-password")
.username("test-authentication-username")
.build())
.url(applicationMetadata.uri)
.name(serviceBrokerName)
.relationships(ServiceBrokerRelationships.builder()
.space(ToOneRelationship.builder()
.data(Relationship.builder()
.id(applicationMetadata.spaceId)
.build())
.build())
.build())
.build())
.flatMap(job -> JobUtils.waitForCompletion(this.cloudFoundryClient, Duration.ofMinutes(5), job))
.then(PaginationUtils
.requestClientV3Resources(page -> this.cloudFoundryClient.serviceBrokersV3()
.list(ListServiceBrokersRequest.builder()
.name(serviceBrokerName)
.page(page)
.build()))
.singleOrEmpty())
.as(StepVerifier::create)
.expectNextCount(1)
.expectComplete()
.verify(Duration.ofMinutes(5));
deleteServiceBroker(this.cloudFoundryClient, applicationMetadata.applicationId)
.block(Duration.ofMinutes(5));
}
@Test
public void delete() {
String planName = this.nameFactory.getPlanName();
String serviceBrokerName = this.nameFactory.getServiceBrokerName();
String serviceName = this.nameFactory.getServiceName();
String spaceName = this.nameFactory.getSpaceName();
ServiceBrokerUtils.ServiceBrokerMetadata serviceBrokerMetadata = this.organizationId
.flatMap(organizationId -> createSpaceId(this.cloudFoundryClient, organizationId, spaceName))
.flatMap(spaceId -> createServiceBroker(this.cloudFoundryClient, this.nameFactory, planName, serviceBrokerName, serviceName, spaceId, true))
.block(Duration.ofMinutes(5));
this.cloudFoundryClient.serviceBrokersV3()
.delete(DeleteServiceBrokerRequest.builder()
.serviceBrokerId(serviceBrokerMetadata.serviceBrokerId)
.build())
.flatMap(job -> JobUtils.waitForCompletion(this.cloudFoundryClient, Duration.ofMinutes(5), job))
.then(PaginationUtils
.requestClientV3Resources(page -> this.cloudFoundryClient.serviceBrokersV3()
.list(ListServiceBrokersRequest.builder()
.name(serviceBrokerName)
.page(page)
.build()))
.singleOrEmpty())
.as(StepVerifier::create)
.expectComplete()
.verify(Duration.ofMinutes(5));
deleteServiceBroker(this.cloudFoundryClient, serviceBrokerMetadata.applicationMetadata.applicationId)
.block(Duration.ofMinutes(5));
}
@Test
public void get() {
this.serviceBrokerId
.flatMap(serviceBrokerId -> this.cloudFoundryClient.serviceBrokersV3()
.get(GetServiceBrokerRequest.builder()
.serviceBrokerId(serviceBrokerId)
.build()))
.as(StepVerifier::create)
.assertNext(serviceBroker -> assertThat(serviceBroker.getName()).isEqualTo(this.serviceBrokerName))
.expectComplete()
.verify(Duration.ofMinutes(5));
}
@Test
public void list() {
PaginationUtils
.requestClientV3Resources(page -> this.cloudFoundryClient.serviceBrokersV3()
.list(ListServiceBrokersRequest.builder()
.name(this.serviceBrokerName)
.page(page)
.build()))
.as(StepVerifier::create)
.expectNextCount(1)
.expectComplete()
.verify(Duration.ofMinutes(5));
}
@Test
public void update() {
String planName = this.nameFactory.getPlanName();
String serviceBrokerName1 = this.nameFactory.getServiceBrokerName();
String serviceBrokerName2 = this.nameFactory.getServiceBrokerName();
String serviceName = this.nameFactory.getServiceName();
String spaceName = this.nameFactory.getSpaceName();
ServiceBrokerUtils.ServiceBrokerMetadata serviceBrokerMetadata = this.organizationId
.flatMap(organizationId -> createSpaceId(this.cloudFoundryClient, organizationId, spaceName))
.flatMap(spaceId -> createServiceBroker(this.cloudFoundryClient, this.nameFactory, planName, serviceBrokerName1, serviceName, spaceId, true))
.block(Duration.ofMinutes(5));
this.cloudFoundryClient.serviceBrokersV3()
.update(UpdateServiceBrokerRequest.builder()
.serviceBrokerId(serviceBrokerMetadata.serviceBrokerId)
.name(serviceBrokerName2)
.build())
.filter(responseUpdate -> responseUpdate.jobId().isPresent())
.map(responseUpdate -> responseUpdate.jobId().get())
.flatMap(job -> JobUtils.waitForCompletion(this.cloudFoundryClient, Duration.ofMinutes(5), job))
.then(PaginationUtils
.requestClientV3Resources(page -> this.cloudFoundryClient.serviceBrokersV3()
.list(ListServiceBrokersRequest.builder()
.name(serviceBrokerName2)
.page(page)
.build()))
.singleOrEmpty())
.as(StepVerifier::create)
.expectNextCount(1)
.expectComplete()
.verify(Duration.ofMinutes(5));
deleteServiceBroker(this.cloudFoundryClient, serviceBrokerMetadata.applicationMetadata.applicationId)
.block(Duration.ofMinutes(5));
}
@Test
public void updateMetadata() {
String planName = this.nameFactory.getPlanName();
String serviceBrokerName = this.nameFactory.getServiceBrokerName();
String serviceName = this.nameFactory.getServiceName();
String spaceName = this.nameFactory.getSpaceName();
ServiceBrokerUtils.ServiceBrokerMetadata serviceBrokerMetadata = this.organizationId
.flatMap(organizationId -> createSpaceId(this.cloudFoundryClient, organizationId, spaceName))
.flatMap(spaceId -> createServiceBroker(this.cloudFoundryClient, this.nameFactory, planName, serviceBrokerName, serviceName, spaceId, true))
.block(Duration.ofMinutes(5));
this.cloudFoundryClient.serviceBrokersV3()
.update(UpdateServiceBrokerRequest.builder()
.serviceBrokerId(serviceBrokerMetadata.serviceBrokerId)
.metadata(Metadata.builder().label("type", "dev").build())
.build())
.filter(responseUpdate -> responseUpdate.jobId().isPresent())
.map(responseUpdate -> responseUpdate.jobId().get())
.flatMap(job -> JobUtils.waitForCompletion(this.cloudFoundryClient, Duration.ofMinutes(5), job))
.then(PaginationUtils
.requestClientV3Resources(page -> this.cloudFoundryClient.serviceBrokersV3()
.list(ListServiceBrokersRequest.builder()
.labelSelector("type=dev")
.name(serviceBrokerName)
.page(page)
.build()))
.singleOrEmpty())
.as(StepVerifier::create)
.expectNextCount(1)
.expectComplete()
.verify(Duration.ofMinutes(5));
deleteServiceBroker(this.cloudFoundryClient, serviceBrokerMetadata.applicationMetadata.applicationId)
.block(Duration.ofMinutes(5));
}
private static Mono<String> createSpaceId(CloudFoundryClient cloudFoundryClient, String organizationId, String spaceName) {
return requestCreateSpace(cloudFoundryClient, organizationId, spaceName)
.map(CreateSpaceResponse::getId);
}
private static Mono<CreateSpaceResponse> requestCreateSpace(CloudFoundryClient cloudFoundryClient, String organizationId, String spaceName) {
return cloudFoundryClient.spacesV3()
.create(CreateSpaceRequest.builder()
.name(spaceName)
.relationships(SpaceRelationships.builder()
.organization(ToOneRelationship.builder()
.data(Relationship.builder()
.id(organizationId)
.build())
.build())
.build())
.build());
}
}
|
|
package alekso56.TkIrc;
import java.io.IOException;
import java.text.NumberFormat;
import java.util.Date;
import java.util.Iterator;
import alekso56.TkIrc.irclib.Base64;
import alekso56.TkIrc.irclib.IRCLib;
import cpw.mods.fml.common.FMLCommonHandler;
import cpw.mods.fml.relauncher.Side;
import net.minecraft.entity.player.EntityPlayerMP;
import net.minecraft.scoreboard.ScorePlayerTeam;
import net.minecraft.server.MinecraftServer;
import net.minecraft.util.ChatComponentText;
import net.minecraftforge.common.DimensionManager;
public class IRCBot extends IRCLib implements API {
private static final String bs = Character.toString('\u00A7');
private double timeFormat(long[] par1ArrayOfLong) {
long time = 0L;
long[] var4 = par1ArrayOfLong;
int var5 = par1ArrayOfLong.length;
for (int var6 = 0; var6 < var5; var6++) {
long var7 = var4[var6];
time += var7;
}
return time / par1ArrayOfLong.length;
}
@Override
public boolean isAuthed(String username, String nick) {
if (TkIrc.ops.contains(username.toLowerCase())) {
String authnum = "0";
//check nickserv
try {
TkIrc.toIrc.sendRaw("NickServ ACC " + username);
String response = TkIrc.toIrc.in.readLine();
String[] parted = response.split(" ");
authnum = parted[5];
} catch (IOException e) {
//e.printStackTrace();
}
if (authnum.equals("3")) {
return true;
}
}
//first check failed, try names
try {
TkIrc.toIrc.sendRaw("names " + Config.cName);
String response = TkIrc.toIrc.in.readLine();
String[] parted = response.split(" ");
for (int curr = 0; curr < parted.length; curr++) {
if (parted[curr].startsWith("@") && parted[curr].contains(username)) {
return true;
}
}
} catch (IOException e) {
if (nick != null) {
TkIrc.toIrc.sendMessage(nick, "Both NS and names failed to work. This is a critical error.");
}
}
if (nick != null) {
TkIrc.toIrc.sendMessage(nick, "You are unauthorized.");
}
return false;
}
@Override
public void onMessage(String usernames, String u, String h, String Channel, String m) {
if(!m.startsWith(Config.prefixforirccommands)){
usernames = colorNick(usernames, u, h);
if (Channel.equals(this.sNick)) {
mcMessage(usernames, m,true);
} else {
String sPrefix = Config.pIngameMSG.replaceAll("%c", Channel).replaceAll("%n", usernames)+ " ";
mcMessage(sPrefix, m,false);
}
return;
}
else{m = m.substring(Config.prefixforirccommands.length());}
m = m.toLowerCase();
if(!Config.IsUserCommandsEnabled && !isAuthed(usernames,null)){
return;
}
if (m.startsWith("players") && (Side.SERVER == FMLCommonHandler.instance().getSide())) {
String[] aPlayers = MinecraftServer.getServer().getAllUsernames();
String lPlayers = aPlayers.length == 0 ? "None." : "";
if(aPlayers.length < 50){
for (String sPlayer : aPlayers) {
sPlayer = Scoreboard(sPlayer, false);
lPlayers = lPlayers + ((lPlayers == "") ? sPlayer : new StringBuilder()
.append(", ").append(sPlayer).toString());
}
if(lPlayers.length() <= 400){
TkIrc.toIrc.sendMessage(Channel, lPlayers);}
else{
int last = 0;
for (int i = 0; i < lPlayers.length();)
{
if (lPlayers.charAt(i) == ',')
{
last = i;
i += 400;
TkIrc.toIrc.sendNotice(Channel, lPlayers.substring(last, i));
}
}
}
}else{
TkIrc.toIrc.sendMessage(Channel, "Too many players present.");
}
return;
}
if (m.startsWith("c ")&&isAuthed(usernames,Channel)&&m.length() >= 2 && !m.substring(1).startsWith("me")) {
TkIrcCommandsender tki = new TkIrcCommandsender();
tki.resetLog();
MinecraftServer.getServer().getCommandManager().executeCommand(tki, m.substring(1));
String out = tki.getLogContents();
tki.resetLog();
if(out.isEmpty()){
TkIrc.toIrc.sendMessage(Channel, "Executed succesfully, but got no return.");
}else{
TkIrc.toIrc.sendMessage(Channel, out);
}
return;
}
if (m.startsWith("tusercommands") && isAuthed(usernames,Channel)) {
if(Config.IsUserCommandsEnabled){Config.IsUserCommandsEnabled = false;}else{Config.IsUserCommandsEnabled = true;}
TkIrc.toIrc.sendNotice(Channel, "Toggled user commands to "+Config.IsUserCommandsEnabled);
return;
}
if (m.startsWith("tachievements") && isAuthed(usernames,Channel)) {
if(Config.Achievements){Config.Achievements = false;}else{Config.Achievements = true;}
TkIrc.toIrc.sendNotice(Channel, "Toggled Achievements to "+Config.Achievements);
return;
}
if (m.startsWith("status")) {
TkIrc.toIrc.sendMessage(Channel, TkIrc.toIrc.getrawurle());
return;
}
if (m.startsWith("help") && m.length() == 4) {
String msgb = "Prefix: "+Config.prefixforirccommands+" help| players| status| tps <t or worldNum>| base64| moddir| rainbow| ";
if (isAuthed(usernames, null)){msgb = msgb+"set <command> <reply>| unset <command>| c <mcCommand>| fakecrash| tUserCommands| tAchievements| ";}
Iterator<String> commands = TkIrc.commands.keySet().iterator();
while (commands.hasNext()){
String current = commands.next();
if(msgb.length() <= 400){
msgb = msgb+current+"| ";
}else{
TkIrc.toIrc.sendNotice(usernames, msgb);
msgb = "";
}
}
TkIrc.toIrc.sendNotice(usernames, msgb);
return;
}else if(m.startsWith("help") && m.length() >= 5){
m = m.substring(5).toLowerCase();
if(m.startsWith("help")){
TkIrc.toIrc.sendNotice(usernames, "help: Display list of commands currently served by this server.");
}else if(m.startsWith("players")){
TkIrc.toIrc.sendNotice(usernames, "players: List all players currently on server, by username.");
}else if(m.startsWith("status")){
TkIrc.toIrc.sendNotice(usernames, "status: Parse all of mojangs services and output non working services.");
}else if(m.startsWith("tps")){
TkIrc.toIrc.sendNotice(usernames, "tps: Show the tick per second on all worlds, or just the number provided");
}else if(m.startsWith("base64")){
TkIrc.toIrc.sendNotice(usernames, "base64: Command converts input to base64, this module is used in sasl, but can also be used here.");
}else if(m.startsWith("moddir")){
TkIrc.toIrc.sendNotice(usernames, "moddir: List all currently loaded forgemods, in notice.");
}else if(m.startsWith("rainbow")){
TkIrc.toIrc.sendNotice(usernames, "rainbow: Command adds random colors to input text.");
}else if(m.startsWith("set")){
TkIrc.toIrc.sendNotice(usernames, "set: Set a response to a phrase, in the format set <command> <reply>");
}else if(m.startsWith("unset")){
TkIrc.toIrc.sendNotice(usernames, "unset: Remove a response to a phrase from the database, unset <command>");
}else if(m.startsWith("c")){
TkIrc.toIrc.sendNotice(usernames, "c: Execute a minecraft command as the server.");
}else if(m.startsWith("fakecrash")){
TkIrc.toIrc.sendNotice(usernames, "fakecrash: Make a crashfile in the crashreports dir, then print a message.");
}else if(m.startsWith("tusercommands")){
TkIrc.toIrc.sendNotice(usernames, "tUserCommands: Toggle users ability to use the (irc)server commands.");
}else if(m.startsWith("tcchievements")){
TkIrc.toIrc.sendNotice(usernames, "tAchievements: Toggle achievements announcements on irc.");
}
return;
}
if(m.startsWith("base64") && m.length() > 8){
TkIrc.toIrc.sendMessage(Channel, Base64.encode(m.substring(7)));
return;
}
if(m.startsWith("moddir")){
String rawData = TkIrc.combinedModList();
if(rawData.length() <= 400){
TkIrc.toIrc.sendNotice(Channel, rawData);
}else{
String[] rowData = rawData.split("(?<=\\G.{400})");
for (String Packet: rowData){
TkIrc.toIrc.sendNotice(Channel, Packet);
}
}
return;
}
if(m.startsWith("rainbow") && m.length() > 8){
TkIrc.toIrc.sendMessage(Channel, colorRainbow(m.substring(8)));
return;
}
if (m.startsWith("tps")) {
StringBuilder out = new StringBuilder();
NumberFormat percentFormatter = NumberFormat.getPercentInstance();
boolean equalz = !m.substring(3).trim().isEmpty();
percentFormatter.setMaximumFractionDigits(1);
boolean wasInt = false;
double totalTickTime = 0.0D;
for (Integer id : DimensionManager.getIDs()) {
double tickTime = timeFormat(MinecraftServer
.getServer().worldTickTimes.get(id)) * 1.0E-006D;
double tps = Math.min(1000.0D / tickTime, 20.0D);
Boolean equals = false;
totalTickTime += tickTime;
try {
equals = equalz && id.equals(Integer.parseInt(m.substring(3).trim()));
wasInt = true;
} catch (NumberFormatException e) {
}
String tickPercent = percentFormatter.format(tps / 20.0D);
String outToPlayer = String.format(
"%2.2f (%s) %06.02fms %3d %s",
new Object[] {
Double.valueOf(tps),
tickPercent,
Double.valueOf(tickTime),
id,
DimensionManager.getProvider(id.intValue())
.getDimensionName() });
if (!m.substring(3).isEmpty()) {
if (equals) {
TkIrc.toIrc.sendMessage(Channel, outToPlayer);
}
} else {
TkIrc.toIrc.sendMessage(Channel, outToPlayer);
}
}
double tps = Math.min(1000.0D / totalTickTime, 20.0D);
String out1 = String.format(
"Overall: %2.2f (%s) %06.02fms",
new Object[] { Double.valueOf(tps),
percentFormatter.format(tps / 20.0D),
Double.valueOf(totalTickTime) });
if (!wasInt || !equalz) {
TkIrc.toIrc.sendMessage(Channel, out1);
}
return;
}
if (m.startsWith("fakecrash") && isAuthed(usernames,Channel)){
TkIrc.FakeCrash(Channel);
return;
}
String[] commandsplit = m.split(" ", 3);
try {
String mesig = commandsplit[0];
if (TkIrc.commands.containsKey(mesig)) {
TkIrc.toIrc.sendMessage(Channel, TkIrc.commands.get(mesig));
return;
}
if (m.startsWith("unset")
&& commandsplit[1] != null && isAuthed(usernames,Channel)) {
if (TkIrc.commands.get(commandsplit[1]) != null) {
TkIrc.commands.remove(commandsplit[1]);
TkIrc.toIrc.sendMessage(Channel, "removed " + commandsplit[1]);
TkIrc.toIrc.savecmd();
} else {
TkIrc.toIrc.sendMessage(Channel,
"Command to be removed not found");
}
return;
}
if (m.startsWith("set") && commandsplit[2] != null && commandsplit[1] != null && isAuthed(usernames,Channel)) {
TkIrc.commands.put(commandsplit[1].toLowerCase(),commandsplit[2]);
TkIrc.toIrc.sendNotice(usernames, "Set " + commandsplit[1] + " as "+ commandsplit[2]);
TkIrc.toIrc.savecmd();
return;
}
} catch (IndexOutOfBoundsException e) {
TkIrc.toIrc.sendMessage(Channel, "Invalid command format");
return;
}
}
public static String Scoreboard(String sPlayer,boolean isMSG) {
if(!isMSG || Config.scoreboardColors){
EntityPlayerMP player = MinecraftServer.getServer().getConfigurationManager().func_152612_a(sPlayer);
String message = ScorePlayerTeam.formatPlayerName(player.getTeam(), sPlayer);
sPlayer = stripColorsForIRC(message.substring(0,message.length()));
}
return sPlayer;
}
@Override
public void onAction(String n, String u, String h, String d, String m) {
n = colorNick(n, u, h);
String sPrefix = Config.pIngameAction.replaceAll("%c",d).replaceAll("%n", n)+" ";
mcMessage("", sPrefix + m,false);
}
@Override
public void onConnected() {
TkIrc.toIrc.joinChannel(Config.cName, Config.cKey);
}
@Override
public void onJoin(String n, String u, String h, String c) {
if (!Config.eJoinIRC) {
return;
}
if (!n.equals(getNick())) {
mcMessage("[" + Config.cName + "] * " + n + " joined the channel");
}
}
@Override
public void onNick(String on, String nn) {
if (!Config.eIRCNick) {
return;
}
if (!nn.equals(getNick())) {
mcMessage("[" + Config.cName + "] * " + on + " is now known as "
+ nn);
}
}
@Override
public void onPart(String n, String u, String h, String c, String r) {
if (!Config.eJoinIRC) {
return;
}
if (!n.equals(getNick())) {
mcMessage("[" + Config.cName + "] * " + n + " left the channel");
}
}
@Override
public void onQuit(String n, String u, String h, String r) {
if (!Config.eJoinIRC) {
return;
}
mcMessage("[" + Config.cName + "] * " + n + " quit IRC (" + r + ")");
}
@Override
public void onKick(String n, String kn, String u, String h, String c,
String r) {
if (!Config.eJoinIRC) {
return;
}
mcMessage("[" + Config.cName + "] * " + kn
+ "was kicked from the channel by " + n + " (" + r + ")");
}
@Override
public void onKick(String s, String kn, String c, String r) {
if (!Config.eJoinIRC) {
return;
}
mcMessage("[" + Config.cName + "] * " + kn
+ "was kicked from the channel by " + s + " (" + r + ")");
}
@Override
public void onCTCP(String n, String u, String h, String d, String m) {
if (m.split(" ")[0].equals("VERSION") || m.split(" ")[0].equals("CLIENTINFO")) {
sendCTCPReply(n, "VERSION Personal TKserver 0.4");
}
if (m.split(" ")[0].equals("TIME")) {
sendCTCPReply(n,
"TIME "+new Date().toString());
}
if (m.split(" ")[0].equals("SOURCE")) {
sendCTCPReply(n,
"SOURCE Wait, i got this... a source is some kind of document right?");
}
if (m.split(" ")[0].equals("PAGE")) {
sendCTCPReply(
n,
"PAGE i have just thrown your page into the trash bin, was it something important?");
}
if (m.split(" ")[0].equals("FINGER")) {
sendCTCPReply(
n,
"FINGER kinky!");
}
if (m.split(" ")[0].equals("USERINFO")) {
sendCTCPReply(
n,
"USERINFO Gender=Female; yeah, i have a gender! who said servers can't have genders!");
}
if (m.split(" ")[0].equals("PING")) {
sendCTCPReply(
n,
"PING "+m.split(" ")[1]);
}
}
public void mcMessage(String p, String m,boolean isPM) {
if (m == null) {
return;
}
if (p == null) {
p = "";
}
m = stripColorsForMC(m);
String x = m;
try{
if(isPM && m.length() > 3){x = m.split(" ")[1];}
if (Side.CLIENT == FMLCommonHandler.instance().getSide()) {
TkIrc.proxy.mcMessage(p, x);
} else {
String[] mParts = x.split("(?<=\\G.{"
+ Integer.toString(118 - p.length()) + "})");
for (String mPart : mParts) {
if (MinecraftServer.getServer() != null && MinecraftServer.getServer().getConfigurationManager() != null) {
if(!isPM){
MinecraftServer.getServer().getConfigurationManager().sendChatMsg(new ChatComponentText(p+mPart));}
else{MinecraftServer.getServer().getConfigurationManager().func_152612_a(m.split(" ")[0]).addChatComponentMessage(new ChatComponentText(p +": "+ mPart));}
}
}
}
}catch(Exception err){/*TkIrc.toIrc.sendMessage(p, "yo, you fakd up shit.");*/}
}
public void mcMessage(String m) {
if (m == null) {
return;
}
if (Side.CLIENT == FMLCommonHandler.instance().getSide()) {
TkIrc.proxy.mcMessage(m);
} else {
String[] mParts = m.split("(?<=\\G.{" + Integer.toString(118) + "})");
for (String mPart : mParts) {
if (MinecraftServer.getServer() != null
&& MinecraftServer.getServer()
.getConfigurationManager() != null) {
MinecraftServer.getServer().getConfigurationManager().sendChatMsg(new ChatComponentText(mPart));
}
}
}
}
public static String stripColorsForMC(String message) {
message = message.replaceAll(bs+"([^\\d+r])", "$1");
message = message.replaceAll("(" + Character.toString('\003')+ "\\d{2}),\\d{1,2}", bs+"1");
message = message.replaceAll(Character.toString('\003') + "15", bs+"7");
message = message.replaceAll(Character.toString('\003') + "14", bs+"8");
message = message.replaceAll(Character.toString('\003') + "13", bs+"d");
message = message.replaceAll(Character.toString('\003') + "12", bs+"9");
message = message.replaceAll(Character.toString('\003') + "11", bs+"b");
message = message.replaceAll(Character.toString('\003') + "10", bs+"3");
message = message.replaceAll(Character.toString('\003') + "09", bs+"a");
message = message.replaceAll(Character.toString('\003') + "08", bs+"e");
message = message.replaceAll(Character.toString('\003') + "07", bs+"6");
message = message.replaceAll(Character.toString('\003') + "06", bs+"5");
message = message.replaceAll(Character.toString('\003') + "05", bs+"4");
message = message.replaceAll(Character.toString('\003') + "04", bs+"c");
message = message.replaceAll(Character.toString('\003') + "03", bs+"2");
message = message.replaceAll(Character.toString('\003') + "02", bs+"1");
message = message.replaceAll(Character.toString('\003') + "01", bs+"0");
message = message.replaceAll(Character.toString('\003') + "00", bs+"f");
message = message.replaceAll(Character.toString('\003') + "9", bs+"a");
message = message.replaceAll(Character.toString('\003') + "8", bs+"e");
message = message.replaceAll(Character.toString('\003') + "7", bs+"6");
message = message.replaceAll(Character.toString('\003') + "6", bs+"5");
message = message.replaceAll(Character.toString('\003') + "5", bs+"4");
message = message.replaceAll(Character.toString('\003') + "4", bs+"c");
message = message.replaceAll(Character.toString('\003') + "3", bs+"2");
message = message.replaceAll(Character.toString('\003') + "2", bs+"1");
message = message.replaceAll(Character.toString('\003') + "1", bs+"0");
message = message.replaceAll(Character.toString('\003') + "0", bs+"f");
message = message.replaceAll(Character.toString('\003'), bs+"r");
message = message.replaceAll(Character.toString('\002'), "");
message = message.replaceAll(Character.toString('\017'), bs+"r");
message = message.replaceAll(Character.toString('\037'), "");
message = message.replaceAll(Character.toString('\035'), "");
message = message.replaceAll(Character.toString('\026'), "");
return message;
}
public static String stripColorsForIRC(String message) {
message = message.replaceAll(bs+"7",Character.toString('\003') + "14");
message = message.replaceAll(bs+"8",Character.toString('\003') + "14");
message = message.replaceAll(bs+"d",Character.toString('\003') + "13");
message = message.replaceAll(bs+"9",Character.toString('\003') + "12");
message = message.replaceAll(bs+"b",Character.toString('\003') + "11");
message = message.replaceAll(bs+"3",Character.toString('\003') + "10");
message = message.replaceAll(bs+"a",Character.toString('\003') + "09");
message = message.replaceAll(bs+"e",Character.toString('\003') + "08");
message = message.replaceAll(bs+"6",Character.toString('\003') + "07");
message = message.replaceAll(bs+"5",Character.toString('\003') + "06");
message = message.replaceAll(bs+"4",Character.toString('\003') + "05");
message = message.replaceAll(bs+"c",Character.toString('\003') + "04");
message = message.replaceAll(bs+"2",Character.toString('\003') + "03");
message = message.replaceAll(bs+"1",Character.toString('\003') + "02");
message = message.replaceAll(bs+"0",Character.toString('\003') + "01");
message = message.replaceAll(bs+"f",Character.toString('\003') + "00");
message = message.replaceAll(bs+"r",Character.toString('\003'));
return message;
}
static int randomWithRange(int min, int max)
{
int range = (max - min) + 1;
return (int)(Math.random() * range) + min;
}
static String colorRainbow(String msg){
String[] colors = new String[14];
colors[13] = Character.toString('\003') + "14";
colors[12] = Character.toString('\003') + "13";
colors[11] = Character.toString('\003') + "12";
colors[10] = Character.toString('\003') + "11";
colors[9] = Character.toString('\003') + "10";
colors[8] = Character.toString('\003') + "09";
colors[7] = Character.toString('\003') + "08";
colors[6] = Character.toString('\003') + "07";
colors[5] = Character.toString('\003') + "06";
colors[4] = Character.toString('\003') + "05";
colors[3] = Character.toString('\003') + "04";
colors[2] = Character.toString('\003') + "03";
colors[1] = Character.toString('\003') + "02";
colors[0] = Character.toString('\003') + "01";
StringBuilder buildstring = new StringBuilder();
for (int i = 0;i < msg.length(); i++){
buildstring.append(colors[randomWithRange(0,13)]+Character.toString(msg.charAt(i)));
}
return buildstring.toString();
}
static String colorNick(String n) {
return n;
}
static String colorNick(String n, String u, String h) {
if (TkIrc.ops.contains(n.toLowerCase())) {
return bs+"4" + n + bs+"r";
} else {
return n;
}
}
static String getNickColor(String n) {
return n;
}
}
|
|
/*
* Conditions Of Use
*
* This software was developed by employees of the National Institute of
* Standards and Technology (NIST), an agency of the Federal Government.
* Pursuant to title 15 Untied States Code Section 105, works of NIST
* employees are not subject to copyright protection in the United States
* and are considered to be in the public domain. As a result, a formal
* license is not needed to use the software.
*
* This software is provided by NIST as a service and is expressly
* provided "AS IS." NIST MAKES NO WARRANTY OF ANY KIND, EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTY OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT
* AND DATA ACCURACY. NIST does not warrant or make any representations
* regarding the use of the software or the results thereof, including but
* not limited to the correctness, accuracy, reliability or usefulness of
* the software.
*
* Permission to use this software is contingent upon your acceptance
* of the terms of this agreement
*
* .
*
*/
/*
*
* IPv6 Support added by Emil Ivov ([email protected])<br/>
* Network Research Team (http://www-r2.u-strasbg.fr))<br/>
* Louis Pasteur University - Strasbourg - France<br/>
*
*Bug fixes for corner cases were contributed by Thomas Froment.
*/
package gov.nist.core;
import gov.nist.javax.sdp.parser.Lexer;
import java.text.ParseException;
/**
* Parser for host names.
*
*@version 1.2
*
*@author M. Ranganathan
*/
public class HostNameParser extends ParserCore {
/**
* Determines whether or not we should tolerate and strip address scope
* zones from IPv6 addresses. Address scope zones are sometimes returned
* at the end of IPv6 addresses generated by InetAddress.getHostAddress().
* They are however not part of the SIP semantics so basically this method
* determines whether or not the parser should be stripping them (as
* opposed simply being blunt and throwing an exception).
*/
private boolean stripAddressScopeZones = false;
public HostNameParser(String hname) {
this.lexer = new LexerCore("charLexer", hname);
stripAddressScopeZones
= Boolean.getBoolean("gov.nist.core.STRIP_ADDR_SCOPES");
}
/**
* The lexer is initialized with the buffer.
*/
public HostNameParser(LexerCore lexer) {
this.lexer = lexer;
lexer.selectLexer("charLexer");
stripAddressScopeZones
= Boolean.getBoolean("gov.nist.core.STRIP_ADDR_SCOPES");
}
private static final char[] VALID_DOMAIN_LABEL_CHAR =
new char[] {LexerCore.ALPHADIGIT_VALID_CHARS, '-', '.'};
protected void consumeDomainLabel() throws ParseException {
if (debug)
dbg_enter("domainLabel");
try {
lexer.consumeValidChars(VALID_DOMAIN_LABEL_CHAR);
} finally {
if (debug)
dbg_leave("domainLabel");
}
}
protected String ipv6Reference() throws ParseException {
StringBuffer retval = new StringBuffer();
if (debug)
dbg_enter("ipv6Reference");
try {
if(stripAddressScopeZones){
while (lexer.hasMoreChars()) {
char la = lexer.lookAhead(0);
//'%' is ipv6 address scope zone. see detail at
//java.sun.com/j2se/1.5.0/docs/api/java/net/Inet6Address.html
if (LexerCore.isHexDigit(la) || la == '.' || la == ':'
|| la == '[' ) {
lexer.consume(1);
retval.append(la);
} else if (la == ']') {
lexer.consume(1);
retval.append(la);
return retval.toString();
} else if (la == '%'){
//we need to strip the address scope zone.
lexer.consume(1);
String rest = lexer.getRest();
if(rest == null || rest.length() == 0){
//head for the parse exception
break;
}
//we strip everything until either the end of the string
//or a closing square bracket (])
int stripLen = rest.indexOf(']');
if (stripLen == -1){
//no square bracket -> not a valid ipv6 reference
break;
}
lexer.consume(stripLen+1);
retval.append("]");
return retval.toString();
} else
break;
}
}
else
{
while (lexer.hasMoreChars())
{
char la = lexer.lookAhead(0);
if (LexerCore.isHexDigit(la) || la == '.'
|| la == ':' || la == '[') {
lexer.consume(1);
retval.append(la);
} else if (la == ']') {
lexer.consume(1);
retval.append(la);
return retval.toString();
} else
break;
}
}
throw new ParseException(
lexer.getBuffer() + ": Illegal Host name ",
lexer.getPtr());
} finally {
if (debug)
dbg_leave("ipv6Reference");
}
}
public Host host() throws ParseException {
if (debug)
dbg_enter("host");
try {
String hostname;
//IPv6 referene
if (lexer.lookAhead(0) == '[') {
hostname = ipv6Reference();
}
//IPv6 address (i.e. missing square brackets)
else if( isIPv6Address(lexer.getRest()) )
{
int startPtr = lexer.getPtr();
lexer.consumeValidChars(
new char[] {LexerCore.ALPHADIGIT_VALID_CHARS, ':'});
hostname
= new StringBuffer("[").append(
lexer.getBuffer().substring(startPtr, lexer.getPtr()))
.append("]").toString();
}
//IPv4 address or hostname
else {
int startPtr = lexer.getPtr();
consumeDomainLabel();
hostname = lexer.getBuffer().substring(startPtr, lexer.getPtr());
}
if (hostname.length() == 0)
throw new ParseException(
lexer.getBuffer() + ": Missing host name",
lexer.getPtr());
else
return new Host(hostname);
} finally {
if (debug)
dbg_leave("host");
}
}
/**
* Tries to determine whether the address in <tt>uriHeader</tt> could be
* an IPv6 address by counting the number of colons that appear in it.
*
* @param uriHeader the string (supposedly the value of a URI header) that
* we have received for parsing.
*
* @return true if the host part of <tt>uriHeader</tt> could be an IPv6
* address (i.e. contains at least two colons) and false otherwise.
*/
private boolean isIPv6Address(String uriHeader)
{
// approximately detect the end the host part.
//first check if we have an uri param
int hostEnd = uriHeader.indexOf(Lexer.QUESTION);
//if not or if it appears after a semi-colon then the end of the
//address would be a header param.
int semiColonIndex = uriHeader.indexOf(Lexer.SEMICOLON);
if ( hostEnd == -1
|| (semiColonIndex!= -1 && hostEnd > semiColonIndex) )
hostEnd = semiColonIndex;
//if there was no header param either the address
//continues until the end of the string
if ( hostEnd == -1 )
hostEnd = uriHeader.length();
//extract the address
String host = uriHeader.substring(0, hostEnd);
int firstColonIndex = host.indexOf(Lexer.COLON);
if(firstColonIndex == -1)
return false;
int secondColonIndex = host.indexOf(Lexer.COLON, firstColonIndex + 1);
if(secondColonIndex == -1)
return false;
return true;
}
/**
* Parses a host:port string
*
* @param allowWS - whether whitespace is allowed around ':', only true for Via headers
* @return
* @throws ParseException
*/
public HostPort hostPort( boolean allowWS ) throws ParseException {
if (debug)
dbg_enter("hostPort");
try {
Host host = this.host();
HostPort hp = new HostPort();
hp.setHost(host);
// Has a port?
if (allowWS) lexer.SPorHT(); // white space before ":port" should be accepted
if (lexer.hasMoreChars()) {
char la = lexer.lookAhead(0);
switch (la)
{
case ':':
lexer.consume(1);
if (allowWS) lexer.SPorHT(); // white space before port number should be accepted
try {
String port = lexer.number();
hp.setPort(Integer.parseInt(port));
} catch (NumberFormatException nfe) {
throw new ParseException(
lexer.getBuffer() + " :Error parsing port ",
lexer.getPtr());
}
break;
case ',': // allowed in case of multi-headers, e.g. Route
// Could check that current header is a multi hdr
case ';': // OK, can appear in URIs (parameters)
case '?': // same, header parameters
case '>': // OK, can appear in headers
case ' ': // OK, allow whitespace
case '\t':
case '\r':
case '\n':
case '/': // e.g. http://[::1]/xyz.html
break;
case '%':
if(stripAddressScopeZones){
break;//OK,allow IPv6 address scope zone
}
default:
if (!allowWS) {
throw new ParseException( lexer.getBuffer() +
" Illegal character in hostname:" + lexer.lookAhead(0),
lexer.getPtr() );
}
}
}
return hp;
} finally {
if (debug)
dbg_leave("hostPort");
}
}
public static void main(String args[]) throws ParseException {
String hostNames[] =
{
"foo.bar.com:1234",
"proxima.chaplin.bt.co.uk",
"129.6.55.181:2345",
":1234",
"foo.bar.com: 1234",
"foo.bar.com : 1234 ",
"MIK_S:1234"
};
for (int i = 0; i < hostNames.length; i++) {
try {
HostNameParser hnp = new HostNameParser(hostNames[i]);
HostPort hp = hnp.hostPort(true);
System.out.println("["+hp.encode()+"]");
} catch (ParseException ex) {
System.out.println("exception text = " + ex.getMessage());
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.