index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
31,939 | sonetel.phonenumber | PhoneNumber |
Phone number class
| class PhoneNumber(util.Resource):
"""
Phone number class
"""
def __init__(self, access_token):
if not access_token:
raise e.AuthException('access_token is required')
super().__init__(access_token=access_token)
self._url = f'{const.API_URI_BASE}{const.API_ENDPOINT_ACCOUNT}{self._accountid}' \
f'{const.API_ENDPOINT_NUMBERSUBSCRIPTION}'
def get(self, e164only: bool = True, number: str = '') -> dict:
"""
List all the phone numbers present in the account.
:param e164only: Optional. Boolean. Only return a list of phone numbers if set to True.
Set to True by default.
:param number: Optional. String. If you only want information about one of your numbers, pass it as a string.
**DOCS**: https://docs.sonetel.com/docs/sonetel-documentation/YXBpOjE2MjQ3MzI4-phone-numbers
:return: Information about the numbers assigned to account.
"""
url = self._url
if not isinstance(number, str):
number = str(number)
if number:
if is_e164(number):
url += number
else:
return util.prepare_error(
code=const.ERR_NUM_NOT_E164,
message=f'"{number}" is not a valid e164 number'
)
api_response = util.send_api_request(token=self._token, uri=url)
response = api_response['response']
# No numbers are found
if response == 'No entries found':
return {
'status': 'success',
'response': 'No entries found'
}
# Only return a list of e164 numbers, without any additional metadata
if e164only:
nums = []
for entry in response:
nums.append(entry['phnum'])
return {
'status': 'success',
'response': nums
}
# Return full response
return {
'status': 'success',
'response': response
}
def add(self, number: str) -> dict:
"""
Buy a phone number that is available. Numbers that are available for purchase can be checked
from the ``/availablephonenumber`` API endpoint.
**DOCS**: https://docs.sonetel.com/docs/sonetel-documentation/YXBpOjE2MjQ3MzI4-phone-numbers
:param number: the phone number you want to purchase.
:return: Dict containing the success response or an error message.
"""
if not isinstance(number, str):
number = str(number)
# Request body
if is_e164(number):
body = {
"phnum": number
}
else:
return util.prepare_error(
code=const.ERR_NUM_NOT_E164,
message=f'"{number}" is not a valid e164 number'
)
return util.send_api_request(
token=self._token,
uri=self._url,
method='post',
body=dumps(body)
)
def delete(self, number: str):
"""
Remove a number from account. The phone number is removed immediately and cannot be recovered.
:param number: The phone number to remove from account.
:return: Dict containing the success response or an error message.
"""
if not isinstance(number, str):
number = str(number)
if is_e164(number):
url = f'{self._url}{number}'
else:
return util.prepare_error(
code=const.ERR_NUM_NOT_E164,
message=f'"{number}" is not a valid e164 number'
)
return util.send_api_request(
token=self._token,
uri=url,
method='delete'
)
def update(self, number: str, connect_to_type: str, connect_to) -> dict:
"""
Update the number's call forwarding settings.
:param number: E164number for which the settings should be updated
:param connect_to: the ID of the destination where the incoming calls to this number should be forwarded.
:param connect_to_type: The destination type where the calls should be forwarded. Accepted values 'user', 'phnum', 'sip' and 'app'.
"""
# Checks
if not number:
return util.prepare_error(
code=const.ERR_NUM_UPDATE_EMPTY,
message='number is required to update call settings'
)
if not connect_to:
return util.prepare_error(
code=const.ERR_NUM_UPDATE_EMPTY,
message='connect_to is required to update call settings'
)
if connect_to_type not in const.CONST_CONNECT_TO_TYPES:
return util.prepare_error(
code=const.ERR_NUM_UPDATE_EMPTY,
message=f'invalid connect_to_type value - {connect_to_type}'
)
# Prepare request
body = {
"connect_to_type": connect_to_type,
"connect_to": connect_to
}
url = f'{self._url}{number}'
# Return result
return util.send_api_request(
token=self._token,
uri=url,
method='put',
body=dumps(body)
)
| (access_token) |
31,940 | sonetel.phonenumber | __init__ | null | def __init__(self, access_token):
if not access_token:
raise e.AuthException('access_token is required')
super().__init__(access_token=access_token)
self._url = f'{const.API_URI_BASE}{const.API_ENDPOINT_ACCOUNT}{self._accountid}' \
f'{const.API_ENDPOINT_NUMBERSUBSCRIPTION}'
| (self, access_token) |
31,941 | sonetel.phonenumber | add |
Buy a phone number that is available. Numbers that are available for purchase can be checked
from the ``/availablephonenumber`` API endpoint.
**DOCS**: https://docs.sonetel.com/docs/sonetel-documentation/YXBpOjE2MjQ3MzI4-phone-numbers
:param number: the phone number you want to purchase.
:return: Dict containing the success response or an error message.
| def add(self, number: str) -> dict:
"""
Buy a phone number that is available. Numbers that are available for purchase can be checked
from the ``/availablephonenumber`` API endpoint.
**DOCS**: https://docs.sonetel.com/docs/sonetel-documentation/YXBpOjE2MjQ3MzI4-phone-numbers
:param number: the phone number you want to purchase.
:return: Dict containing the success response or an error message.
"""
if not isinstance(number, str):
number = str(number)
# Request body
if is_e164(number):
body = {
"phnum": number
}
else:
return util.prepare_error(
code=const.ERR_NUM_NOT_E164,
message=f'"{number}" is not a valid e164 number'
)
return util.send_api_request(
token=self._token,
uri=self._url,
method='post',
body=dumps(body)
)
| (self, number: str) -> dict |
31,942 | sonetel.phonenumber | delete |
Remove a number from account. The phone number is removed immediately and cannot be recovered.
:param number: The phone number to remove from account.
:return: Dict containing the success response or an error message.
| def delete(self, number: str):
"""
Remove a number from account. The phone number is removed immediately and cannot be recovered.
:param number: The phone number to remove from account.
:return: Dict containing the success response or an error message.
"""
if not isinstance(number, str):
number = str(number)
if is_e164(number):
url = f'{self._url}{number}'
else:
return util.prepare_error(
code=const.ERR_NUM_NOT_E164,
message=f'"{number}" is not a valid e164 number'
)
return util.send_api_request(
token=self._token,
uri=url,
method='delete'
)
| (self, number: str) |
31,943 | sonetel.phonenumber | get |
List all the phone numbers present in the account.
:param e164only: Optional. Boolean. Only return a list of phone numbers if set to True.
Set to True by default.
:param number: Optional. String. If you only want information about one of your numbers, pass it as a string.
**DOCS**: https://docs.sonetel.com/docs/sonetel-documentation/YXBpOjE2MjQ3MzI4-phone-numbers
:return: Information about the numbers assigned to account.
| def get(self, e164only: bool = True, number: str = '') -> dict:
"""
List all the phone numbers present in the account.
:param e164only: Optional. Boolean. Only return a list of phone numbers if set to True.
Set to True by default.
:param number: Optional. String. If you only want information about one of your numbers, pass it as a string.
**DOCS**: https://docs.sonetel.com/docs/sonetel-documentation/YXBpOjE2MjQ3MzI4-phone-numbers
:return: Information about the numbers assigned to account.
"""
url = self._url
if not isinstance(number, str):
number = str(number)
if number:
if is_e164(number):
url += number
else:
return util.prepare_error(
code=const.ERR_NUM_NOT_E164,
message=f'"{number}" is not a valid e164 number'
)
api_response = util.send_api_request(token=self._token, uri=url)
response = api_response['response']
# No numbers are found
if response == 'No entries found':
return {
'status': 'success',
'response': 'No entries found'
}
# Only return a list of e164 numbers, without any additional metadata
if e164only:
nums = []
for entry in response:
nums.append(entry['phnum'])
return {
'status': 'success',
'response': nums
}
# Return full response
return {
'status': 'success',
'response': response
}
| (self, e164only: bool = True, number: str = '') -> dict |
31,944 | sonetel.phonenumber | update |
Update the number's call forwarding settings.
:param number: E164number for which the settings should be updated
:param connect_to: the ID of the destination where the incoming calls to this number should be forwarded.
:param connect_to_type: The destination type where the calls should be forwarded. Accepted values 'user', 'phnum', 'sip' and 'app'.
| def update(self, number: str, connect_to_type: str, connect_to) -> dict:
"""
Update the number's call forwarding settings.
:param number: E164number for which the settings should be updated
:param connect_to: the ID of the destination where the incoming calls to this number should be forwarded.
:param connect_to_type: The destination type where the calls should be forwarded. Accepted values 'user', 'phnum', 'sip' and 'app'.
"""
# Checks
if not number:
return util.prepare_error(
code=const.ERR_NUM_UPDATE_EMPTY,
message='number is required to update call settings'
)
if not connect_to:
return util.prepare_error(
code=const.ERR_NUM_UPDATE_EMPTY,
message='connect_to is required to update call settings'
)
if connect_to_type not in const.CONST_CONNECT_TO_TYPES:
return util.prepare_error(
code=const.ERR_NUM_UPDATE_EMPTY,
message=f'invalid connect_to_type value - {connect_to_type}'
)
# Prepare request
body = {
"connect_to_type": connect_to_type,
"connect_to": connect_to
}
url = f'{self._url}{number}'
# Return result
return util.send_api_request(
token=self._token,
uri=url,
method='put',
body=dumps(body)
)
| (self, number: str, connect_to_type: str, connect_to) -> dict |
31,945 | sonetel.recording | Recording |
Class representing the call recording resource.
| class Recording(util.Resource):
"""
Class representing the call recording resource.
"""
def __init__(self, access_token: str = None):
super().__init__(access_token)
self._url = f'{const.API_URI_BASE}{const.API_ENDPOINT_CALL_RECORDING}'
def get(self,
start_time: str = None,
end_time: str = None,
file_access_details: bool = False,
voice_call_details: bool = False,
rec_id: str = None
):
"""
Get a list of all the call recordings.
:param start_time: The start timestamp in the format YYYYMMDDTHH:MM:SSZ. Example 20201231T23:59:59. Limit the results to recordings created after this timestamp.
:param end_time: The end timestamp in the format YYYYMMDDTHH:MM:SSZ. Example 20221123T18:59:59. Limit the results to recordings created before this timestamp.
:param rec_id: The unique recording ID. If not included, returns all the recordings.
:param file_access_details: Boolean. Include the details needed to download recordings.
:param voice_call_details: Boolean. Include the details of the voice calls.
"""
url = self._url
# Prepare the request URL based on the params passed to the method
if rec_id:
# Get a single recording
url += f'/{rec_id}'
else:
# Search for and return multiple recordings
url += f'?account_id={self._accountid}'
if util.is_valid_date(start_time) and util.is_valid_date(end_time) and util.date_diff(start_time, end_time):
url += f'&created_date_max={end_time}&created_date_min={start_time}'
fields = []
if file_access_details:
fields.append('file_access_details')
if voice_call_details:
fields.append('voice_call_details')
if len(fields) > 0:
url += '&fields=' + ','.join(fields)
return util.send_api_request(token=self._token, uri=url, method='get')
def delete(self, rec_id: str) -> dict:
"""
Delete a call recording.
:param rec_id: The ID of the recording that should be deleted
:returns: A representation of the deleted recording.
"""
url = f'{self._url}/{rec_id}'
return util.send_api_request(token=self._token, uri=url, method='delete')
| (access_token: str = None) |
31,946 | sonetel.recording | __init__ | null | def __init__(self, access_token: str = None):
super().__init__(access_token)
self._url = f'{const.API_URI_BASE}{const.API_ENDPOINT_CALL_RECORDING}'
| (self, access_token: Optional[str] = None) |
31,947 | sonetel.recording | delete |
Delete a call recording.
:param rec_id: The ID of the recording that should be deleted
:returns: A representation of the deleted recording.
| def delete(self, rec_id: str) -> dict:
"""
Delete a call recording.
:param rec_id: The ID of the recording that should be deleted
:returns: A representation of the deleted recording.
"""
url = f'{self._url}/{rec_id}'
return util.send_api_request(token=self._token, uri=url, method='delete')
| (self, rec_id: str) -> dict |
31,948 | sonetel.recording | get |
Get a list of all the call recordings.
:param start_time: The start timestamp in the format YYYYMMDDTHH:MM:SSZ. Example 20201231T23:59:59. Limit the results to recordings created after this timestamp.
:param end_time: The end timestamp in the format YYYYMMDDTHH:MM:SSZ. Example 20221123T18:59:59. Limit the results to recordings created before this timestamp.
:param rec_id: The unique recording ID. If not included, returns all the recordings.
:param file_access_details: Boolean. Include the details needed to download recordings.
:param voice_call_details: Boolean. Include the details of the voice calls.
| def get(self,
start_time: str = None,
end_time: str = None,
file_access_details: bool = False,
voice_call_details: bool = False,
rec_id: str = None
):
"""
Get a list of all the call recordings.
:param start_time: The start timestamp in the format YYYYMMDDTHH:MM:SSZ. Example 20201231T23:59:59. Limit the results to recordings created after this timestamp.
:param end_time: The end timestamp in the format YYYYMMDDTHH:MM:SSZ. Example 20221123T18:59:59. Limit the results to recordings created before this timestamp.
:param rec_id: The unique recording ID. If not included, returns all the recordings.
:param file_access_details: Boolean. Include the details needed to download recordings.
:param voice_call_details: Boolean. Include the details of the voice calls.
"""
url = self._url
# Prepare the request URL based on the params passed to the method
if rec_id:
# Get a single recording
url += f'/{rec_id}'
else:
# Search for and return multiple recordings
url += f'?account_id={self._accountid}'
if util.is_valid_date(start_time) and util.is_valid_date(end_time) and util.date_diff(start_time, end_time):
url += f'&created_date_max={end_time}&created_date_min={start_time}'
fields = []
if file_access_details:
fields.append('file_access_details')
if voice_call_details:
fields.append('voice_call_details')
if len(fields) > 0:
url += '&fields=' + ','.join(fields)
return util.send_api_request(token=self._token, uri=url, method='get')
| (self, start_time: Optional[str] = None, end_time: Optional[str] = None, file_access_details: bool = False, voice_call_details: bool = False, rec_id: Optional[str] = None) |
31,949 | sonetel.users | User |
Create and manage users in a Sonetel account
| class User(util.Resource):
"""
Create and manage users in a Sonetel account
"""
def __init__(self, access_token: str):
if not access_token:
raise e.AuthException('access_token is required')
super().__init__(access_token=access_token)
self._url = f'{const.API_URI_BASE}{const.API_ENDPOINT_ACCOUNT}{self._accountid}{const.API_ENDPOINT_USER}'
def get(self, all_users: bool = False, userid: str = ''):
"""
Fetch details about all users or a specific user.
If userid is not included with the request, details of the current user are fetched.
:param all_users: Boolean. Optional. Get a list of all the users in the account. Defaults to False.
:param userid: String. Optional. ID of a specific user to get the information for.
"""
url = self._url
if userid:
url += userid
elif not all_users:
url += self._userid
return util.send_api_request(
token=self._token,
uri=url,
method='get') if util.is_valid_token(self._decoded_token) else False
def add(self,
email: str,
f_name: str,
l_name: str,
password: str,
user_type: str = 'regular'
) -> dict:
"""
Adds a new user. Account admin privilege required.
:param email: Required. String. The email address of the user.
:param f_name: Required. String. The first name of the user
:param l_name: Required. String. The last name of the user.
:param password: Required. String. The password to be used.
:param user_type: Required. String. The privilege level of the new user. Accepted values regular and admin.
Defaults to regular.
"""
# Checks
if not password:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='password cannot be empty'
)
if not email:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='email cannot be empty'
)
if not f_name:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='first name cannot be empty'
)
if not l_name:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='last name cannot be empty'
)
# Request
url = self._url
body = {
"user_fname": f_name,
"user_lname": l_name,
"email": email,
"password": password,
"type": user_type
}
return util.send_api_request(
token=self._token,
uri=url,
method='post',
body=dumps(body)
)
def delete(self, userid: str):
"""
Delete a user from your Sonetel account.
:param userid: String. Required. The unique ID of the user to be deleted.
"""
if not userid:
return util.prepare_error(
code=const.ERR_USED_ID_EMPTY,
message='user id cannot be empty'
)
url = self._url + userid
return util.send_api_request(
token=self._token,
uri=url,
method='delete'
)
def update(self, request: dict):
"""
update user settings
"""
raise NotImplementedError
| (access_token: str) |
31,950 | sonetel.users | __init__ | null | def __init__(self, access_token: str):
if not access_token:
raise e.AuthException('access_token is required')
super().__init__(access_token=access_token)
self._url = f'{const.API_URI_BASE}{const.API_ENDPOINT_ACCOUNT}{self._accountid}{const.API_ENDPOINT_USER}'
| (self, access_token: str) |
31,951 | sonetel.users | add |
Adds a new user. Account admin privilege required.
:param email: Required. String. The email address of the user.
:param f_name: Required. String. The first name of the user
:param l_name: Required. String. The last name of the user.
:param password: Required. String. The password to be used.
:param user_type: Required. String. The privilege level of the new user. Accepted values regular and admin.
Defaults to regular.
| def add(self,
email: str,
f_name: str,
l_name: str,
password: str,
user_type: str = 'regular'
) -> dict:
"""
Adds a new user. Account admin privilege required.
:param email: Required. String. The email address of the user.
:param f_name: Required. String. The first name of the user
:param l_name: Required. String. The last name of the user.
:param password: Required. String. The password to be used.
:param user_type: Required. String. The privilege level of the new user. Accepted values regular and admin.
Defaults to regular.
"""
# Checks
if not password:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='password cannot be empty'
)
if not email:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='email cannot be empty'
)
if not f_name:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='first name cannot be empty'
)
if not l_name:
return util.prepare_error(
code=const.ERR_USER_DETAIL_EMPTY,
message='last name cannot be empty'
)
# Request
url = self._url
body = {
"user_fname": f_name,
"user_lname": l_name,
"email": email,
"password": password,
"type": user_type
}
return util.send_api_request(
token=self._token,
uri=url,
method='post',
body=dumps(body)
)
| (self, email: str, f_name: str, l_name: str, password: str, user_type: str = 'regular') -> dict |
31,952 | sonetel.users | delete |
Delete a user from your Sonetel account.
:param userid: String. Required. The unique ID of the user to be deleted.
| def delete(self, userid: str):
"""
Delete a user from your Sonetel account.
:param userid: String. Required. The unique ID of the user to be deleted.
"""
if not userid:
return util.prepare_error(
code=const.ERR_USED_ID_EMPTY,
message='user id cannot be empty'
)
url = self._url + userid
return util.send_api_request(
token=self._token,
uri=url,
method='delete'
)
| (self, userid: str) |
31,953 | sonetel.users | get |
Fetch details about all users or a specific user.
If userid is not included with the request, details of the current user are fetched.
:param all_users: Boolean. Optional. Get a list of all the users in the account. Defaults to False.
:param userid: String. Optional. ID of a specific user to get the information for.
| def get(self, all_users: bool = False, userid: str = ''):
"""
Fetch details about all users or a specific user.
If userid is not included with the request, details of the current user are fetched.
:param all_users: Boolean. Optional. Get a list of all the users in the account. Defaults to False.
:param userid: String. Optional. ID of a specific user to get the information for.
"""
url = self._url
if userid:
url += userid
elif not all_users:
url += self._userid
return util.send_api_request(
token=self._token,
uri=url,
method='get') if util.is_valid_token(self._decoded_token) else False
| (self, all_users: bool = False, userid: str = '') |
31,954 | sonetel.users | update |
update user settings
| def update(self, request: dict):
"""
update user settings
"""
raise NotImplementedError
| (self, request: dict) |
31,964 | pingouin.utils | _check_dataframe | Checks whether data is a dataframe or can be converted to a dataframe.
If successful, a dataframe is returned. If not successful, a ValueError is
raised.
| def _check_dataframe(data=None, dv=None, between=None, within=None, subject=None, effects=None):
"""Checks whether data is a dataframe or can be converted to a dataframe.
If successful, a dataframe is returned. If not successful, a ValueError is
raised.
"""
# Check that data is a dataframe
if not isinstance(data, pd.DataFrame):
# DataMatrix objects can be safely convert to DataFrame objects. By
# first checking the name of the class, we avoid having to actually
# import DataMatrix unless it is necessary.
if data.__class__.__name__ == "DataMatrix": # noqa
try:
from datamatrix import DataMatrix, convert as cnv # noqa
except ImportError:
raise ValueError(
"Failed to convert object to pandas dataframe (DataMatrix not available)" # noqa
)
else:
if isinstance(data, DataMatrix):
data = cnv.to_pandas(data)
else:
raise ValueError("Data must be a pandas dataframe or compatible object.")
else:
raise ValueError("Data must be a pandas dataframe or compatible object.")
# Check that both dv and data are provided.
if any(v is None for v in [dv, data]):
raise ValueError("DV and data must be specified")
# Check that dv is a numeric variable
if data[dv].dtype.kind not in "fi":
raise ValueError("DV must be numeric.")
# Check that effects is provided
if effects not in ["within", "between", "interaction", "all"]:
raise ValueError("Effects must be: within, between, interaction, all")
# Check that within is a string, int or a list (rm_anova2)
if effects == "within" and not isinstance(within, (str, int, list)):
raise ValueError("within must be a string, int or a list.")
# Check that subject identifier is provided in rm_anova and friedman.
if effects == "within" and subject is None:
raise ValueError("subject must be specified when effects=within")
# Check that between is a string or a list (anova2)
if effects == "between" and not isinstance(between, (str, int, list)):
raise ValueError("between must be a string, int or a list.")
# Check that both between and within are present for interaction
if effects == "interaction":
for input in [within, between]:
if not isinstance(input, (str, int, list)):
raise ValueError("within and between must be specified when effects=interaction")
return data
| (data=None, dv=None, between=None, within=None, subject=None, effects=None) |
31,965 | pingouin.utils | _check_eftype | Check validity of eftype | def _check_eftype(eftype):
"""Check validity of eftype"""
if eftype.lower() in [
"none",
"hedges",
"cohen",
"r",
"pointbiserialr",
"eta-square",
"odds-ratio",
"auc",
"cles",
]:
return True
else:
return False
| (eftype) |
31,966 | pingouin.utils | _flatten_list | Flatten an arbitrarily nested list into a new list.
This can be useful to select pandas DataFrame columns.
From https://stackoverflow.com/a/16176969/10581531
Examples
--------
>>> from pingouin.utils import _flatten_list
>>> x = ['X1', ['M1', 'M2'], 'Y1', ['Y2']]
>>> _flatten_list(x)
['X1', 'M1', 'M2', 'Y1', 'Y2']
>>> x = ['Xaa', 'Xbb', 'Xcc']
>>> _flatten_list(x)
['Xaa', 'Xbb', 'Xcc']
>>> x = ['Xaa', ('Xbb', 'Xcc'), (1, 2), (1)]
>>> _flatten_list(x)
['Xaa', ('Xbb', 'Xcc'), (1, 2), 1]
>>> _flatten_list(x, include_tuple=True)
['Xaa', 'Xbb', 'Xcc', 1, 2, 1]
| def _flatten_list(x, include_tuple=False):
"""Flatten an arbitrarily nested list into a new list.
This can be useful to select pandas DataFrame columns.
From https://stackoverflow.com/a/16176969/10581531
Examples
--------
>>> from pingouin.utils import _flatten_list
>>> x = ['X1', ['M1', 'M2'], 'Y1', ['Y2']]
>>> _flatten_list(x)
['X1', 'M1', 'M2', 'Y1', 'Y2']
>>> x = ['Xaa', 'Xbb', 'Xcc']
>>> _flatten_list(x)
['Xaa', 'Xbb', 'Xcc']
>>> x = ['Xaa', ('Xbb', 'Xcc'), (1, 2), (1)]
>>> _flatten_list(x)
['Xaa', ('Xbb', 'Xcc'), (1, 2), 1]
>>> _flatten_list(x, include_tuple=True)
['Xaa', 'Xbb', 'Xcc', 1, 2, 1]
"""
# If x is not iterable, return x
if not isinstance(x, collections.abc.Iterable):
return x
# Initialize empty output variable
result = []
# Loop over items in x
for el in x:
# Check if element is iterable
el_is_iter = isinstance(el, collections.abc.Iterable)
if el_is_iter:
if not isinstance(el, (str, tuple)):
result.extend(_flatten_list(el))
else:
if isinstance(el, tuple) and include_tuple:
result.extend(_flatten_list(el))
else:
result.append(el)
else:
result.append(el)
# Remove None from output
result = [r for r in result if r is not None]
return result
| (x, include_tuple=False) |
31,967 | pingouin.utils | _is_mpmath_installed | Check if mpmath is installed. | def _is_mpmath_installed(raise_error=False):
"""Check if mpmath is installed."""
try:
import mpmath # noqa
is_installed = True
except OSError: # pragma: no cover
is_installed = False
# Raise error (if needed) :
if raise_error and not is_installed: # pragma: no cover
raise OSError("mpmath needs to be installed. Please use `pip " "install mpmath`.")
return is_installed
| (raise_error=False) |
31,968 | pingouin.utils | _is_sklearn_installed | Check if sklearn is installed. | def _is_sklearn_installed(raise_error=False):
"""Check if sklearn is installed."""
try:
import sklearn # noqa
is_installed = True
except OSError: # pragma: no cover
is_installed = False
# Raise error (if needed) :
if raise_error and not is_installed: # pragma: no cover
raise OSError("sklearn needs to be installed. Please use `pip " "install scikit-learn`.")
return is_installed
| (raise_error=False) |
31,969 | pingouin.utils | _is_statsmodels_installed | Check if statsmodels is installed. | def _is_statsmodels_installed(raise_error=False):
"""Check if statsmodels is installed."""
try:
import statsmodels # noqa
is_installed = True
except OSError: # pragma: no cover
is_installed = False
# Raise error (if needed) :
if raise_error and not is_installed: # pragma: no cover
raise OSError("statsmodels needs to be installed. Please use `pip " "install statsmodels`.")
return is_installed
| (raise_error=False) |
31,970 | pingouin.utils | _perm_pval |
Compute p-values from a permutation test.
Parameters
----------
bootstat : 1D array
Permutation distribution.
estimate : float or int
Point estimate.
alternative : str
Tail for p-value. Can be either `'two-sided'` (default), `'greater'` or `'less'`.
Returns
-------
p : float
P-value.
| def _perm_pval(bootstat, estimate, alternative="two-sided"):
"""
Compute p-values from a permutation test.
Parameters
----------
bootstat : 1D array
Permutation distribution.
estimate : float or int
Point estimate.
alternative : str
Tail for p-value. Can be either `'two-sided'` (default), `'greater'` or `'less'`.
Returns
-------
p : float
P-value.
"""
assert alternative in ["two-sided", "greater", "less"], "Wrong tail argument."
assert isinstance(estimate, (int, float))
bootstat = np.asarray(bootstat)
assert bootstat.ndim == 1, "bootstat must be a 1D array."
n_boot = bootstat.size
assert n_boot >= 1, "bootstat must have at least one value."
if alternative == "greater":
p = np.greater_equal(bootstat, estimate).sum() / n_boot
elif alternative == "less":
p = np.less_equal(bootstat, estimate).sum() / n_boot
else:
p = np.greater_equal(np.fabs(bootstat), abs(estimate)).sum() / n_boot
return p
| (bootstat, estimate, alternative='two-sided') |
31,971 | pingouin.utils | _postprocess_dataframe | Apply some post-processing to an ouput dataframe (e.g. rounding).
Whether and how rounding is applied is governed by options specified in
`pingouin.options`. The default rounding (number of decimals) is
determined by `pingouin.options['round']`. You can specify rounding for a
given column name by the option `'round.column.<colname>'`, e.g.
`'round.column.CI95%'`. Analogously, `'round.row.<rowname>'` also works
(where `rowname`) refers to the pandas index), as well as
`'round.cell.[<rolname>]x[<colname]'`. A cell-based option is used,
if available; if not, a column-based option is used, if
available; if not, a row-based option is used, if available; if not,
the default is used. (Default `pingouin.options['round'] = None`,
i.e. no rounding is applied.)
If a round option is `callable` instead of `int`, then it will be called,
and the return value stored in the cell.
Post-processing is applied on a copy of the DataFrame, leaving the
original DataFrame untouched.
This is an internal function (no public API).
Parameters
----------
df : :py:class:`pandas.DataFrame`
Dataframe to apply post-processing to (e.g. ANOVA summary)
Returns
----------
df : :py:class:`pandas.DataFrame`
Dataframe with post-processing applied
| def _postprocess_dataframe(df):
"""Apply some post-processing to an ouput dataframe (e.g. rounding).
Whether and how rounding is applied is governed by options specified in
`pingouin.options`. The default rounding (number of decimals) is
determined by `pingouin.options['round']`. You can specify rounding for a
given column name by the option `'round.column.<colname>'`, e.g.
`'round.column.CI95%'`. Analogously, `'round.row.<rowname>'` also works
(where `rowname`) refers to the pandas index), as well as
`'round.cell.[<rolname>]x[<colname]'`. A cell-based option is used,
if available; if not, a column-based option is used, if
available; if not, a row-based option is used, if available; if not,
the default is used. (Default `pingouin.options['round'] = None`,
i.e. no rounding is applied.)
If a round option is `callable` instead of `int`, then it will be called,
and the return value stored in the cell.
Post-processing is applied on a copy of the DataFrame, leaving the
original DataFrame untouched.
This is an internal function (no public API).
Parameters
----------
df : :py:class:`pandas.DataFrame`
Dataframe to apply post-processing to (e.g. ANOVA summary)
Returns
----------
df : :py:class:`pandas.DataFrame`
Dataframe with post-processing applied
"""
df = df.copy()
for row, col in it.product(df.index, df.columns):
round_option = _get_round_setting_for(row, col)
if round_option is None:
continue
if callable(round_option):
newval = round_option(df.at[row, col])
# ensure that dtype changes are processed
df[col] = df[col].astype(type(newval))
df.at[row, col] = newval
continue
if isinstance(df.at[row, col], bool):
# No rounding if value is a boolean
continue
is_number = isinstance(df.at[row, col], numbers.Number)
is_array = isinstance(df.at[row, col], np.ndarray)
if not any([is_number, is_array]):
# No rounding if value is not a Number or an array
continue
if is_array:
is_float_array = issubclass(df.at[row, col].dtype.type, np.floating)
if not is_float_array:
# No rounding if value is not a float array
continue
df.at[row, col] = np.round(df.at[row, col], decimals=round_option)
return df
| (df) |
31,972 | pingouin.parametric | ancova | ANCOVA with one or more covariate(s).
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column in data with the dependent variable.
between : string
Name of column in data with the between factor.
covar : string or list
Name(s) of column(s) in data with the covariate.
effsize : str
Effect size. Must be 'np2' (partial eta-squared) or 'n2'
(eta-squared).
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANCOVA summary:
* ``'Source'``: Names of the factor considered
* ``'SS'``: Sums of squares
* ``'DF'``: Degrees of freedom
* ``'F'``: F-values
* ``'p-unc'``: Uncorrected p-values
* ``'np2'``: Partial eta-squared
Notes
-----
Analysis of covariance (ANCOVA) is a general linear model which blends
ANOVA and regression. ANCOVA evaluates whether the means of a dependent
variable (dv) are equal across levels of a categorical independent
variable (between) often called a treatment, while statistically
controlling for the effects of other continuous variables that are not
of primary interest, known as covariates or nuisance variables (covar).
Pingouin uses :py:class:`statsmodels.regression.linear_model.OLS` to
compute the ANCOVA.
.. important:: Rows with missing values are automatically removed
(listwise deletion).
See Also
--------
anova : One-way and N-way ANOVA
Examples
--------
1. Evaluate the reading scores of students with different teaching method
and family income as a covariate.
>>> from pingouin import ancova, read_dataset
>>> df = read_dataset('ancova')
>>> ancova(data=df, dv='Scores', covar='Income', between='Method')
Source SS DF F p-unc np2
0 Method 571.029883 3 3.336482 0.031940 0.244077
1 Income 1678.352687 1 29.419438 0.000006 0.486920
2 Residual 1768.522313 31 NaN NaN NaN
2. Evaluate the reading scores of students with different teaching method
and family income + BMI as a covariate.
>>> ancova(data=df, dv='Scores', covar=['Income', 'BMI'], between='Method',
... effsize="n2")
Source SS DF F p-unc n2
0 Method 552.284043 3 3.232550 0.036113 0.141802
1 Income 1573.952434 1 27.637304 0.000011 0.404121
2 BMI 60.013656 1 1.053790 0.312842 0.015409
3 Residual 1708.508657 30 NaN NaN NaN
| @pf.register_dataframe_method
def ancova(data=None, dv=None, between=None, covar=None, effsize="np2"):
"""ANCOVA with one or more covariate(s).
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column in data with the dependent variable.
between : string
Name of column in data with the between factor.
covar : string or list
Name(s) of column(s) in data with the covariate.
effsize : str
Effect size. Must be 'np2' (partial eta-squared) or 'n2'
(eta-squared).
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANCOVA summary:
* ``'Source'``: Names of the factor considered
* ``'SS'``: Sums of squares
* ``'DF'``: Degrees of freedom
* ``'F'``: F-values
* ``'p-unc'``: Uncorrected p-values
* ``'np2'``: Partial eta-squared
Notes
-----
Analysis of covariance (ANCOVA) is a general linear model which blends
ANOVA and regression. ANCOVA evaluates whether the means of a dependent
variable (dv) are equal across levels of a categorical independent
variable (between) often called a treatment, while statistically
controlling for the effects of other continuous variables that are not
of primary interest, known as covariates or nuisance variables (covar).
Pingouin uses :py:class:`statsmodels.regression.linear_model.OLS` to
compute the ANCOVA.
.. important:: Rows with missing values are automatically removed
(listwise deletion).
See Also
--------
anova : One-way and N-way ANOVA
Examples
--------
1. Evaluate the reading scores of students with different teaching method
and family income as a covariate.
>>> from pingouin import ancova, read_dataset
>>> df = read_dataset('ancova')
>>> ancova(data=df, dv='Scores', covar='Income', between='Method')
Source SS DF F p-unc np2
0 Method 571.029883 3 3.336482 0.031940 0.244077
1 Income 1678.352687 1 29.419438 0.000006 0.486920
2 Residual 1768.522313 31 NaN NaN NaN
2. Evaluate the reading scores of students with different teaching method
and family income + BMI as a covariate.
>>> ancova(data=df, dv='Scores', covar=['Income', 'BMI'], between='Method',
... effsize="n2")
Source SS DF F p-unc n2
0 Method 552.284043 3 3.232550 0.036113 0.141802
1 Income 1573.952434 1 27.637304 0.000011 0.404121
2 BMI 60.013656 1 1.053790 0.312842 0.015409
3 Residual 1708.508657 30 NaN NaN NaN
"""
# Import
from pingouin.utils import _is_statsmodels_installed
_is_statsmodels_installed(raise_error=True)
from statsmodels.api import stats
from statsmodels.formula.api import ols
# Safety checks
assert effsize in ["np2", "n2"], "effsize must be 'np2' or 'n2'."
assert isinstance(data, pd.DataFrame), "data must be a pandas dataframe."
assert isinstance(between, str), (
"between must be a string. Pingouin does not support multiple "
"between factors. For more details, please see "
"https://github.com/raphaelvallat/pingouin/issues/173."
)
assert dv in data.columns, "%s is not in data." % dv
assert between in data.columns, "%s is not in data." % between
assert isinstance(covar, (str, list)), "covar must be a str or a list."
if isinstance(covar, str):
covar = [covar]
for c in covar:
assert c in data.columns, "covariate %s is not in data" % c
assert data[c].dtype.kind in "bfi", "covariate %s is not numeric" % c
# Drop missing values
data = data[_flatten_list([dv, between, covar])].dropna()
# Fit ANCOVA model
# formula = dv ~ 1 + between + covar1 + covar2 + ...
assert dv not in ["C", "Q"], "`dv` must not be 'C' or 'Q'."
assert between not in ["C", "Q"], "`between` must not be 'C' or 'Q'."
assert all(c not in ["C", "Q"] for c in covar), "`covar` must not contain 'C' or 'Q'."
formula = f"Q('{dv}') ~ C(Q('{between}'))"
for c in covar:
formula += " + Q('%s')" % (c)
model = ols(formula, data=data).fit()
# Create output dataframe
aov = stats.anova_lm(model, typ=2).reset_index()
aov.rename(
columns={"index": "Source", "sum_sq": "SS", "df": "DF", "PR(>F)": "p-unc"}, inplace=True
)
aov.at[0, "Source"] = between
for i in range(len(covar)):
aov.at[i + 1, "Source"] = covar[i]
aov["DF"] = aov["DF"].astype(int)
# Add effect sizes
if effsize == "n2":
all_effsize = (aov["SS"] / aov["SS"].sum()).to_numpy()
all_effsize[-1] = np.nan
else:
ss_resid = aov["SS"].iloc[-1]
all_effsize = aov["SS"].apply(lambda x: x / (x + ss_resid)).to_numpy()
all_effsize[-1] = np.nan
aov[effsize] = all_effsize
# Add bw as an attribute (for rm_corr function)
aov = _postprocess_dataframe(aov)
aov.bw_ = model.params.iloc[-1]
return aov
| (data=None, dv=None, between=None, covar=None, effsize='np2') |
31,973 | pingouin.distribution | anderson | Anderson-Darling test of distribution.
The Anderson-Darling test tests the null hypothesis that a sample is drawn from a population
that follows a particular distribution. For the Anderson-Darling test, the critical values
depend on which distribution is being tested against.
This function is a wrapper around :py:func:`scipy.stats.anderson`.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. They may be of different lengths.
dist : string
The type of distribution to test against. The default is 'norm'.
Must be one of 'norm', 'expon', 'logistic', 'gumbel'.
Returns
-------
from_dist : boolean
A boolean indicating if the data comes from the tested distribution (True) or not (False).
sig_level : float
The significance levels for the corresponding critical values, in %.
See :py:func:`scipy.stats.anderson` for more details.
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> import numpy as np
>>> np.random.seed(42)
>>> x = np.random.normal(size=100)
>>> y = np.random.normal(size=10000)
>>> z = np.random.random(1000)
>>> anderson(x)
(True, 15.0)
2. Test that multiple arrays comes from the normal distribution
>>> anderson(x, y, z)
(array([ True, True, False]), array([15., 15., 1.]))
3. Test that an array comes from the exponential distribution
>>> x = np.random.exponential(size=1000)
>>> anderson(x, dist="expon")
(True, 15.0)
| def anderson(*args, dist="norm"):
"""Anderson-Darling test of distribution.
The Anderson-Darling test tests the null hypothesis that a sample is drawn from a population
that follows a particular distribution. For the Anderson-Darling test, the critical values
depend on which distribution is being tested against.
This function is a wrapper around :py:func:`scipy.stats.anderson`.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. They may be of different lengths.
dist : string
The type of distribution to test against. The default is 'norm'.
Must be one of 'norm', 'expon', 'logistic', 'gumbel'.
Returns
-------
from_dist : boolean
A boolean indicating if the data comes from the tested distribution (True) or not (False).
sig_level : float
The significance levels for the corresponding critical values, in %.
See :py:func:`scipy.stats.anderson` for more details.
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> import numpy as np
>>> np.random.seed(42)
>>> x = np.random.normal(size=100)
>>> y = np.random.normal(size=10000)
>>> z = np.random.random(1000)
>>> anderson(x)
(True, 15.0)
2. Test that multiple arrays comes from the normal distribution
>>> anderson(x, y, z)
(array([ True, True, False]), array([15., 15., 1.]))
3. Test that an array comes from the exponential distribution
>>> x = np.random.exponential(size=1000)
>>> anderson(x, dist="expon")
(True, 15.0)
"""
k = len(args)
from_dist = np.zeros(k, dtype="bool")
sig_level = np.zeros(k)
for j in range(k):
st, cr, sig = scipy.stats.anderson(args[j], dist=dist)
from_dist[j] = True if (st < cr).any() else False
sig_level[j] = sig[np.argmin(np.abs(st - cr))]
if k == 1:
from_dist = from_dist[0]
sig_level = sig_level[0]
return from_dist, sig_level
| (*args, dist='norm') |
31,974 | pingouin.parametric | anova | One-way and *N*-way ANOVA.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column in ``data`` containing the dependent variable.
between : string or list with *N* elements
Name of column(s) in ``data`` containing the between-subject factor(s).
If ``between`` is a single string, a one-way ANOVA is computed.
If ``between`` is a list with two or more elements, a *N*-way ANOVA is
performed.
Note that Pingouin will internally call statsmodels to calculate
ANOVA with 3 or more factors, or unbalanced two-way ANOVA.
ss_type : int
Specify how the sums of squares is calculated for *unbalanced* design
with 2 or more factors. Can be 1, 2 (default), or 3. This has no impact
on one-way design or N-way ANOVA with balanced data.
detailed : boolean
If True, return a detailed ANOVA table
(default True for N-way ANOVA).
effsize : str
Effect size. Must be 'np2' (partial eta-squared) or 'n2'
(eta-squared). Note that for one-way ANOVA partial eta-squared is the
same as eta-squared.
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANOVA summary:
* ``'Source'``: Factor names
* ``'SS'``: Sums of squares
* ``'DF'``: Degrees of freedom
* ``'MS'``: Mean squares
* ``'F'``: F-values
* ``'p-unc'``: uncorrected p-values
* ``'np2'``: Partial eta-square effect sizes
See Also
--------
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
welch_anova : One-way Welch ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA (:py:func:`pingouin.welch_anova`) that
better controls for type I error (Liu 2015). The homogeneity of variances
can be measured with the :py:func:`pingouin.homoscedasticity` function.
The main idea of ANOVA is to partition the variance (sums of squares)
into several components. For example, in one-way ANOVA:
.. math::
SS_{\text{total}} = SS_{\text{effect}} + SS_{\text{error}}
SS_{\text{total}} = \sum_i \sum_j (Y_{ij} - \overline{Y})^2
SS_{\text{effect}} = \sum_i n_i (\overline{Y_i} - \overline{Y})^2
SS_{\text{error}} = \sum_i \sum_j (Y_{ij} - \overline{Y}_i)^2
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of groups,
and :math:`n_i` the number of observations for the :math:`i` th group.
The F-statistics is then defined as:
.. math::
F^* = \frac{MS_{\text{effect}}}{MS_{\text{error}}} =
\frac{SS_{\text{effect}} / (r - 1)}{SS_{\text{error}} / (n_t - r)}
and the p-value can be calculated using a F-distribution with
:math:`r-1, n_t-1` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (:py:func:`pingouin.pairwise_tukey`).
If the groups have unequal variances, the Games-Howell test is more
adequate (:py:func:`pingouin.pairwise_gameshowell`).
The default effect size reported in Pingouin is the partial eta-square,
which, for one-way ANOVA is the same as eta-square and generalized
eta-square.
.. math::
\eta_p^2 = \frac{SS_{\text{effect}}}{SS_{\text{effect}} +
SS_{\text{error}}}
Missing values are automatically removed. Results have been tested against
R, Matlab and JASP.
Examples
--------
One-way ANOVA
>>> import pingouin as pg
>>> df = pg.read_dataset('anova')
>>> aov = pg.anova(dv='Pain threshold', between='Hair color', data=df,
... detailed=True)
>>> aov.round(3)
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.004 0.576
1 Within 1001.800 15 66.787 NaN NaN NaN
Same but using a standard eta-squared instead of a partial eta-squared
effect size. Also note how here we're using the anova function directly as
a method (= built-in function) of our pandas dataframe. In that case,
we don't have to specify ``data`` anymore.
>>> df.anova(dv='Pain threshold', between='Hair color', detailed=False,
... effsize='n2')
Source ddof1 ddof2 F p-unc n2
0 Hair color 3 15 6.791407 0.004114 0.575962
Two-way ANOVA with balanced design
>>> data = pg.read_dataset('anova2')
>>> data.anova(dv="Yield", between=["Blend", "Crop"]).round(3)
Source SS DF MS F p-unc np2
0 Blend 2.042 1 2.042 0.004 0.952 0.000
1 Crop 2736.583 2 1368.292 2.525 0.108 0.219
2 Blend * Crop 2360.083 2 1180.042 2.178 0.142 0.195
3 Residual 9753.250 18 541.847 NaN NaN NaN
Two-way ANOVA with unbalanced design (requires statsmodels)
>>> data = pg.read_dataset('anova2_unbalanced')
>>> data.anova(dv="Scores", between=["Diet", "Exercise"],
... effsize="n2").round(3)
Source SS DF MS F p-unc n2
0 Diet 390.625 1.0 390.625 7.423 0.034 0.433
1 Exercise 180.625 1.0 180.625 3.432 0.113 0.200
2 Diet * Exercise 15.625 1.0 15.625 0.297 0.605 0.017
3 Residual 315.750 6.0 52.625 NaN NaN NaN
Three-way ANOVA, type 3 sums of squares (requires statsmodels)
>>> data = pg.read_dataset('anova3')
>>> data.anova(dv='Cholesterol', between=['Sex', 'Risk', 'Drug'],
... ss_type=3).round(3)
Source SS DF MS F p-unc np2
0 Sex 2.075 1.0 2.075 2.462 0.123 0.049
1 Risk 11.332 1.0 11.332 13.449 0.001 0.219
2 Drug 0.816 2.0 0.408 0.484 0.619 0.020
3 Sex * Risk 0.117 1.0 0.117 0.139 0.711 0.003
4 Sex * Drug 2.564 2.0 1.282 1.522 0.229 0.060
5 Risk * Drug 2.438 2.0 1.219 1.446 0.245 0.057
6 Sex * Risk * Drug 1.844 2.0 0.922 1.094 0.343 0.044
7 Residual 40.445 48.0 0.843 NaN NaN NaN
| @pf.register_dataframe_method
def anova(data=None, dv=None, between=None, ss_type=2, detailed=False, effsize="np2"):
"""One-way and *N*-way ANOVA.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column in ``data`` containing the dependent variable.
between : string or list with *N* elements
Name of column(s) in ``data`` containing the between-subject factor(s).
If ``between`` is a single string, a one-way ANOVA is computed.
If ``between`` is a list with two or more elements, a *N*-way ANOVA is
performed.
Note that Pingouin will internally call statsmodels to calculate
ANOVA with 3 or more factors, or unbalanced two-way ANOVA.
ss_type : int
Specify how the sums of squares is calculated for *unbalanced* design
with 2 or more factors. Can be 1, 2 (default), or 3. This has no impact
on one-way design or N-way ANOVA with balanced data.
detailed : boolean
If True, return a detailed ANOVA table
(default True for N-way ANOVA).
effsize : str
Effect size. Must be 'np2' (partial eta-squared) or 'n2'
(eta-squared). Note that for one-way ANOVA partial eta-squared is the
same as eta-squared.
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANOVA summary:
* ``'Source'``: Factor names
* ``'SS'``: Sums of squares
* ``'DF'``: Degrees of freedom
* ``'MS'``: Mean squares
* ``'F'``: F-values
* ``'p-unc'``: uncorrected p-values
* ``'np2'``: Partial eta-square effect sizes
See Also
--------
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
welch_anova : One-way Welch ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA (:py:func:`pingouin.welch_anova`) that
better controls for type I error (Liu 2015). The homogeneity of variances
can be measured with the :py:func:`pingouin.homoscedasticity` function.
The main idea of ANOVA is to partition the variance (sums of squares)
into several components. For example, in one-way ANOVA:
.. math::
SS_{\\text{total}} = SS_{\\text{effect}} + SS_{\\text{error}}
SS_{\\text{total}} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y})^2
SS_{\\text{effect}} = \\sum_i n_i (\\overline{Y_i} - \\overline{Y})^2
SS_{\\text{error}} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y}_i)^2
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of groups,
and :math:`n_i` the number of observations for the :math:`i` th group.
The F-statistics is then defined as:
.. math::
F^* = \\frac{MS_{\\text{effect}}}{MS_{\\text{error}}} =
\\frac{SS_{\\text{effect}} / (r - 1)}{SS_{\\text{error}} / (n_t - r)}
and the p-value can be calculated using a F-distribution with
:math:`r-1, n_t-1` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (:py:func:`pingouin.pairwise_tukey`).
If the groups have unequal variances, the Games-Howell test is more
adequate (:py:func:`pingouin.pairwise_gameshowell`).
The default effect size reported in Pingouin is the partial eta-square,
which, for one-way ANOVA is the same as eta-square and generalized
eta-square.
.. math::
\\eta_p^2 = \\frac{SS_{\\text{effect}}}{SS_{\\text{effect}} +
SS_{\\text{error}}}
Missing values are automatically removed. Results have been tested against
R, Matlab and JASP.
Examples
--------
One-way ANOVA
>>> import pingouin as pg
>>> df = pg.read_dataset('anova')
>>> aov = pg.anova(dv='Pain threshold', between='Hair color', data=df,
... detailed=True)
>>> aov.round(3)
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.004 0.576
1 Within 1001.800 15 66.787 NaN NaN NaN
Same but using a standard eta-squared instead of a partial eta-squared
effect size. Also note how here we're using the anova function directly as
a method (= built-in function) of our pandas dataframe. In that case,
we don't have to specify ``data`` anymore.
>>> df.anova(dv='Pain threshold', between='Hair color', detailed=False,
... effsize='n2')
Source ddof1 ddof2 F p-unc n2
0 Hair color 3 15 6.791407 0.004114 0.575962
Two-way ANOVA with balanced design
>>> data = pg.read_dataset('anova2')
>>> data.anova(dv="Yield", between=["Blend", "Crop"]).round(3)
Source SS DF MS F p-unc np2
0 Blend 2.042 1 2.042 0.004 0.952 0.000
1 Crop 2736.583 2 1368.292 2.525 0.108 0.219
2 Blend * Crop 2360.083 2 1180.042 2.178 0.142 0.195
3 Residual 9753.250 18 541.847 NaN NaN NaN
Two-way ANOVA with unbalanced design (requires statsmodels)
>>> data = pg.read_dataset('anova2_unbalanced')
>>> data.anova(dv="Scores", between=["Diet", "Exercise"],
... effsize="n2").round(3)
Source SS DF MS F p-unc n2
0 Diet 390.625 1.0 390.625 7.423 0.034 0.433
1 Exercise 180.625 1.0 180.625 3.432 0.113 0.200
2 Diet * Exercise 15.625 1.0 15.625 0.297 0.605 0.017
3 Residual 315.750 6.0 52.625 NaN NaN NaN
Three-way ANOVA, type 3 sums of squares (requires statsmodels)
>>> data = pg.read_dataset('anova3')
>>> data.anova(dv='Cholesterol', between=['Sex', 'Risk', 'Drug'],
... ss_type=3).round(3)
Source SS DF MS F p-unc np2
0 Sex 2.075 1.0 2.075 2.462 0.123 0.049
1 Risk 11.332 1.0 11.332 13.449 0.001 0.219
2 Drug 0.816 2.0 0.408 0.484 0.619 0.020
3 Sex * Risk 0.117 1.0 0.117 0.139 0.711 0.003
4 Sex * Drug 2.564 2.0 1.282 1.522 0.229 0.060
5 Risk * Drug 2.438 2.0 1.219 1.446 0.245 0.057
6 Sex * Risk * Drug 1.844 2.0 0.922 1.094 0.343 0.044
7 Residual 40.445 48.0 0.843 NaN NaN NaN
"""
assert effsize in ["np2", "n2"], "effsize must be 'np2' or 'n2'."
if isinstance(between, list):
if len(between) == 0:
raise ValueError("between is empty.")
elif len(between) == 1:
between = between[0]
elif len(between) == 2:
# Two factors with balanced design = Pingouin implementation
# Two factors with unbalanced design = statsmodels
return anova2(dv=dv, between=between, data=data, ss_type=ss_type, effsize=effsize)
else:
# 3 or more factors with (un)-balanced design = statsmodels
return anovan(dv=dv, between=between, data=data, ss_type=ss_type, effsize=effsize)
# Check data
data = _check_dataframe(dv=dv, between=between, data=data, effects="between")
# Drop missing values
data = data[[dv, between]].dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
n_groups = data[between].nunique()
N = data[dv].size
# Calculate sums of squares
grp = data.groupby(between, observed=True, group_keys=False)[dv]
# Between effect
ssbetween = (
(grp.mean(numeric_only=True) - data[dv].mean(numeric_only=True)) ** 2 * grp.count()
).sum()
# Within effect (= error between)
# = (grp.var(ddof=0) * grp.count()).sum()
sserror = grp.transform(lambda x: (x - x.mean()) ** 2).sum()
# In 1-way ANOVA, sstotal = ssbetween + sserror
# sstotal = ssbetween + sserror
# Calculate DOF, MS, F and p-values
ddof1 = n_groups - 1
ddof2 = N - n_groups
msbetween = ssbetween / ddof1
mserror = sserror / ddof2
fval = msbetween / mserror
p_unc = f(ddof1, ddof2).sf(fval)
# Calculating effect sizes (see Bakeman 2005; Lakens 2013)
# In one-way ANOVA, partial eta2 = eta2 = generalized eta2
# Similar to (fval * ddof1) / (fval * ddof1 + ddof2)
np2 = ssbetween / (ssbetween + sserror) # = ssbetween / sstotal
# Omega-squared
# o2 = (ddof1 * (msbetween - mserror)) / (sstotal + mserror)
# Create output dataframe
if not detailed:
aov = pd.DataFrame(
{
"Source": between,
"ddof1": ddof1,
"ddof2": ddof2,
"F": fval,
"p-unc": p_unc,
effsize: np2,
},
index=[0],
)
else:
aov = pd.DataFrame(
{
"Source": [between, "Within"],
"SS": [ssbetween, sserror],
"DF": [ddof1, ddof2],
"MS": [msbetween, mserror],
"F": [fval, np.nan],
"p-unc": [p_unc, np.nan],
effsize: [np2, np.nan],
}
)
aov.dropna(how="all", axis=1, inplace=True)
return _postprocess_dataframe(aov)
| (data=None, dv=None, between=None, ss_type=2, detailed=False, effsize='np2') |
31,975 | pingouin.bayesian | bayesfactor_binom |
Bayes factor of a binomial test with :math:`k` successes,
:math:`n` trials and base probability :math:`p`. This means that
the null hypothesis is that the probability is :math:`p`. It is
compared against the alternative hypothesis that :math:`p` is from
the Beta distribution with parameters :math:`(a, b)`. By default,
both :math:`a` and :math:`b` are 1, making the alternative
hypothesis equivalent to the uniform distribution, i.e., we are
completely uninformed about :math:`p`.
Parameters
----------
k : int
Number of successes.
n : int
Number of trials.
p : float
Base probability of success (range from 0 to 1).
a : float
The "a" parameter of the Beta distribution.
b : float
The "b" parameter of the Beta distribution.
Returns
-------
bf10 : float
The Bayes Factor quantifies the evidence in favour of the
alternative hypothesis, where the null hypothesis is that
the random variable is binomially distributed with base probability
:math:`p`.
See also
--------
bayesfactor_pearson : Bayes Factor of a correlation
bayesfactor_ttest : Bayes Factor of a T-test
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/binombf.m
The Bayes Factor is given by the formula below:
.. math::
BF_{10} = \frac{\int_0^1 \binom{n}{k}g^k(1-g)^{n-k}}
{\binom{n}{k} p^k (1-p)^{n-k}}
References
----------
* http://pcl.missouri.edu/bf-binomial
* https://en.wikipedia.org/wiki/Bayes_factor
Examples
--------
We want to determine if a coin if fair. After tossing the coin 200 times
in a row, we report 115 heads (hereafter referred to as "successes") and 85
tails ("failures"). The Bayes Factor can be easily computed using Pingouin:
>>> import pingouin as pg
>>> bf = float(pg.bayesfactor_binom(k=115, n=200, p=0.5))
>>> # Note that Pingouin returns the BF-alt by default.
>>> # BF-null is simply 1 / BF-alt
>>> print("BF-null: %.3f, BF-alt: %.3f" % (1 / bf, bf))
BF-null: 1.197, BF-alt: 0.835
Since the Bayes Factor of the null hypothesis ("the coin is fair") is
higher than the Bayes Factor of the alternative hypothesis
("the coin is not fair"), we can conclude that there is more evidence to
support the fact that the coin is indeed fair. However, the strength of the
evidence in favor of the null hypothesis (1.197) is "barely worth
mentionning" according to Jeffreys's rule of thumb.
Interestingly, a frequentist alternative to this test would give very
different results. It can be performed using the
:py:func:`scipy.stats.binom_test` function:
>>> from scipy.stats import binomtest
>>> result = binomtest(k=115, n=200, p=0.5)
>>> round(result.pvalue, 5)
0.04004
The binomial test rejects the null hypothesis that the coin is fair at the
5% significance level (p=0.04). Thus, whereas a frequentist hypothesis test
would yield significant results at the 5% significance level, the Bayes
factor indicates preference of the null hypothesis to the alternative
hypothesis that we know nothing about p.
We can use a more informed alternative hypothesis too, if desirable. E.g.,
the original test using Beta(5, 4) as the alternative hypothesis:
>>> bf = pg.bayesfactor_binom(k=115, n=200, p=0.5, a=5, b=4)
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 1.930
Using a different base probability of successes:
>>> bf = pg.bayesfactor_binom(k=100, n=1000, p=0.1)
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 0.024
| def bayesfactor_binom(k, n, p=0.5, a=1, b=1):
"""
Bayes factor of a binomial test with :math:`k` successes,
:math:`n` trials and base probability :math:`p`. This means that
the null hypothesis is that the probability is :math:`p`. It is
compared against the alternative hypothesis that :math:`p` is from
the Beta distribution with parameters :math:`(a, b)`. By default,
both :math:`a` and :math:`b` are 1, making the alternative
hypothesis equivalent to the uniform distribution, i.e., we are
completely uninformed about :math:`p`.
Parameters
----------
k : int
Number of successes.
n : int
Number of trials.
p : float
Base probability of success (range from 0 to 1).
a : float
The "a" parameter of the Beta distribution.
b : float
The "b" parameter of the Beta distribution.
Returns
-------
bf10 : float
The Bayes Factor quantifies the evidence in favour of the
alternative hypothesis, where the null hypothesis is that
the random variable is binomially distributed with base probability
:math:`p`.
See also
--------
bayesfactor_pearson : Bayes Factor of a correlation
bayesfactor_ttest : Bayes Factor of a T-test
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/binombf.m
The Bayes Factor is given by the formula below:
.. math::
BF_{10} = \\frac{\\int_0^1 \\binom{n}{k}g^k(1-g)^{n-k}}
{\\binom{n}{k} p^k (1-p)^{n-k}}
References
----------
* http://pcl.missouri.edu/bf-binomial
* https://en.wikipedia.org/wiki/Bayes_factor
Examples
--------
We want to determine if a coin if fair. After tossing the coin 200 times
in a row, we report 115 heads (hereafter referred to as "successes") and 85
tails ("failures"). The Bayes Factor can be easily computed using Pingouin:
>>> import pingouin as pg
>>> bf = float(pg.bayesfactor_binom(k=115, n=200, p=0.5))
>>> # Note that Pingouin returns the BF-alt by default.
>>> # BF-null is simply 1 / BF-alt
>>> print("BF-null: %.3f, BF-alt: %.3f" % (1 / bf, bf))
BF-null: 1.197, BF-alt: 0.835
Since the Bayes Factor of the null hypothesis ("the coin is fair") is
higher than the Bayes Factor of the alternative hypothesis
("the coin is not fair"), we can conclude that there is more evidence to
support the fact that the coin is indeed fair. However, the strength of the
evidence in favor of the null hypothesis (1.197) is "barely worth
mentionning" according to Jeffreys's rule of thumb.
Interestingly, a frequentist alternative to this test would give very
different results. It can be performed using the
:py:func:`scipy.stats.binom_test` function:
>>> from scipy.stats import binomtest
>>> result = binomtest(k=115, n=200, p=0.5)
>>> round(result.pvalue, 5)
0.04004
The binomial test rejects the null hypothesis that the coin is fair at the
5% significance level (p=0.04). Thus, whereas a frequentist hypothesis test
would yield significant results at the 5% significance level, the Bayes
factor indicates preference of the null hypothesis to the alternative
hypothesis that we know nothing about p.
We can use a more informed alternative hypothesis too, if desirable. E.g.,
the original test using Beta(5, 4) as the alternative hypothesis:
>>> bf = pg.bayesfactor_binom(k=115, n=200, p=0.5, a=5, b=4)
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 1.930
Using a different base probability of successes:
>>> bf = pg.bayesfactor_binom(k=100, n=1000, p=0.1)
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 0.024
"""
from scipy.stats import beta, binom
assert 0 < p < 1, "p must be between 0 and 1."
assert isinstance(k, int), "k must be int."
assert isinstance(n, int), "n must be int."
assert k <= n, "k (successes) cannot be higher than n (trials)."
assert a > 0, "a must be positive."
assert b > 0, "b must be positive."
def fun(g):
return beta.pdf(g, a, b) * binom.pmf(k, n, g)
bf10 = quad(fun, 0, 1)[0] / binom.pmf(k, n, p)
return bf10
| (k, n, p=0.5, a=1, b=1) |
31,976 | pingouin.bayesian | bayesfactor_pearson |
Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient.
n : int
Sample size.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
method : str
Method to compute the Bayes Factor. Can be "ly" (default) or
"wetzels". The former has an exact analytical solution, while the
latter requires integral solving (and is therefore slower). "wetzels"
was the default in Pingouin <= 0.2.5. See Notes for details.
kappa : float
Kappa factor. This is sometimes called the *rscale* parameter, and
is only used when ``method`` is "ly".
Returns
-------
bf : float
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
See also
--------
corr : (Robust) correlation between two variables
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
bayesfactor_ttest : Bayes Factor of a T-test
bayesfactor_binom : Bayes Factor of a binomial test
Notes
-----
To compute the Bayes Factor directly from the raw data, use the
:py:func:`pingouin.corr` function.
The two-sided **Wetzels Bayes Factor** (also called *JZS Bayes Factor*)
is calculated using the equation 13 and associated R code of [1]_:
.. math::
\text{BF}_{10}(n, r) = \frac{\sqrt{n/2}}{\gamma(1/2)}*
\int_{0}^{\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where :math:`n` is the sample size, :math:`r` is the Pearson correlation
coefficient and :math:`g` is is an auxiliary variable that is integrated
out numerically. Since the Wetzels Bayes Factor requires solving an
integral, it is slower than the analytical solution described below.
The two-sided **Ly Bayes Factor** (also called *Jeffreys
exact Bayes Factor*) is calculated using equation 25 of [2]_:
.. math::
\text{BF}_{10;k}(n, r) = \frac{2^{\frac{k-2}{k}}\sqrt{\pi}}
{\beta(\frac{1}{k}, \frac{1}{k})} \cdot
\frac{\Gamma(\frac{2+k(n-1)}{2k})}{\Gamma(\frac{2+nk}{2k})}
\cdot 2F_1(\frac{n-1}{2}, \frac{n-1}{2}, \frac{2+nk}{2k}, r^2)
The one-sided version is described in eq. 27 and 28 of Ly et al, 2016.
Please take note that the one-sided test requires the
`mpmath <http://mpmath.org/>`_ package.
Results have been validated against JASP and the BayesFactor R package.
References
----------
.. [1] Ly, A., Verhagen, J. & Wagenmakers, E.-J. Harold Jeffreys’s default
Bayes factor hypothesis tests: Explanation, extension, and
application in psychology. J. Math. Psychol. 72, 19–32 (2016).
.. [2] Wetzels, R. & Wagenmakers, E.-J. A default Bayesian hypothesis test
for correlations and partial correlations. Psychon. Bull. Rev. 19,
1057–1064 (2012).
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> r, n = 0.6, 20
>>> bf = bayesfactor_pearson(r, n)
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 10.634
Compare to Wetzels method:
>>> bf = bayesfactor_pearson(r, n, method='wetzels')
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 8.221
One-sided test
>>> bf10pos = bayesfactor_pearson(r, n, alternative='greater')
>>> bf10neg = bayesfactor_pearson(r, n, alternative='less')
>>> print("BF-pos: %.3f, BF-neg: %.3f" % (bf10pos, bf10neg))
BF-pos: 21.185, BF-neg: 0.082
| def bayesfactor_pearson(r, n, alternative="two-sided", method="ly", kappa=1.0):
"""
Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient.
n : int
Sample size.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
method : str
Method to compute the Bayes Factor. Can be "ly" (default) or
"wetzels". The former has an exact analytical solution, while the
latter requires integral solving (and is therefore slower). "wetzels"
was the default in Pingouin <= 0.2.5. See Notes for details.
kappa : float
Kappa factor. This is sometimes called the *rscale* parameter, and
is only used when ``method`` is "ly".
Returns
-------
bf : float
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
See also
--------
corr : (Robust) correlation between two variables
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
bayesfactor_ttest : Bayes Factor of a T-test
bayesfactor_binom : Bayes Factor of a binomial test
Notes
-----
To compute the Bayes Factor directly from the raw data, use the
:py:func:`pingouin.corr` function.
The two-sided **Wetzels Bayes Factor** (also called *JZS Bayes Factor*)
is calculated using the equation 13 and associated R code of [1]_:
.. math::
\\text{BF}_{10}(n, r) = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}*
\\int_{0}^{\\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where :math:`n` is the sample size, :math:`r` is the Pearson correlation
coefficient and :math:`g` is is an auxiliary variable that is integrated
out numerically. Since the Wetzels Bayes Factor requires solving an
integral, it is slower than the analytical solution described below.
The two-sided **Ly Bayes Factor** (also called *Jeffreys
exact Bayes Factor*) is calculated using equation 25 of [2]_:
.. math::
\\text{BF}_{10;k}(n, r) = \\frac{2^{\\frac{k-2}{k}}\\sqrt{\\pi}}
{\\beta(\\frac{1}{k}, \\frac{1}{k})} \\cdot
\\frac{\\Gamma(\\frac{2+k(n-1)}{2k})}{\\Gamma(\\frac{2+nk}{2k})}
\\cdot 2F_1(\\frac{n-1}{2}, \\frac{n-1}{2}, \\frac{2+nk}{2k}, r^2)
The one-sided version is described in eq. 27 and 28 of Ly et al, 2016.
Please take note that the one-sided test requires the
`mpmath <http://mpmath.org/>`_ package.
Results have been validated against JASP and the BayesFactor R package.
References
----------
.. [1] Ly, A., Verhagen, J. & Wagenmakers, E.-J. Harold Jeffreys’s default
Bayes factor hypothesis tests: Explanation, extension, and
application in psychology. J. Math. Psychol. 72, 19–32 (2016).
.. [2] Wetzels, R. & Wagenmakers, E.-J. A default Bayesian hypothesis test
for correlations and partial correlations. Psychon. Bull. Rev. 19,
1057–1064 (2012).
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> r, n = 0.6, 20
>>> bf = bayesfactor_pearson(r, n)
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 10.634
Compare to Wetzels method:
>>> bf = bayesfactor_pearson(r, n, method='wetzels')
>>> print("Bayes Factor: %.3f" % bf)
Bayes Factor: 8.221
One-sided test
>>> bf10pos = bayesfactor_pearson(r, n, alternative='greater')
>>> bf10neg = bayesfactor_pearson(r, n, alternative='less')
>>> print("BF-pos: %.3f, BF-neg: %.3f" % (bf10pos, bf10neg))
BF-pos: 21.185, BF-neg: 0.082
"""
from scipy.special import gamma, betaln, hyp2f1
assert method.lower() in ["ly", "wetzels"], "Method not recognized."
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
# Wrong input
if not np.isfinite(r) or n < 2:
return np.nan
assert -1 <= r <= 1, "r must be between -1 and 1."
if alternative != "two-sided" and method.lower() == "wetzels":
warnings.warn(
"One-sided Bayes Factor are not supported by the "
"Wetzels's method. Switching to method='ly'."
)
method = "ly"
if method.lower() == "wetzels":
# Wetzels & Wagenmakers, 2012. Integral solving
def fun(g, r, n):
return exp(
((n - 2) / 2) * log(1 + g)
+ (-(n - 1) / 2) * log(1 + (1 - r**2) * g)
+ (-3 / 2) * log(g)
+ -n / (2 * g)
)
integr = quad(fun, 0, np.inf, args=(r, n))[0]
bf10 = np.sqrt(n / 2) / gamma(1 / 2) * integr
else:
# Ly et al, 2016. Analytical solution.
k = kappa
lbeta = betaln(1 / k, 1 / k)
log_hyperterm = log(hyp2f1(((n - 1) / 2), ((n - 1) / 2), ((n + 2 / k) / 2), r**2))
bf10 = exp(
(1 - 2 / k) * log(2)
+ 0.5 * log(pi)
- lbeta
+ lgamma((n + 2 / k - 1) / 2)
- lgamma((n + 2 / k) / 2)
+ log_hyperterm
)
if alternative != "two-sided":
# Directional test.
# We need mpmath for the generalized hypergeometric function
from .utils import _is_mpmath_installed
_is_mpmath_installed(raise_error=True)
from mpmath import hyp3f2
hyper_term = float(hyp3f2(1, n / 2, n / 2, 3 / 2, (2 + k * (n + 1)) / (2 * k), r**2))
log_term = 2 * (lgamma(n / 2) - lgamma((n - 1) / 2)) - lbeta
C = 2 ** ((3 * k - 2) / k) * k * r / (2 + (n - 1) * k) * exp(log_term) * hyper_term
bf10neg = bf10 - C
bf10pos = 2 * bf10 - bf10neg
if alternative == "greater":
# We expect the correlation to be positive
bf10 = bf10pos
else:
# We expect the correlation to be negative
bf10 = bf10neg
return bf10
| (r, n, alternative='two-sided', method='ly', kappa=1.0) |
31,977 | pingouin.bayesian | bayesfactor_ttest |
Bayes Factor of a T-test.
Parameters
----------
t : float
T-value of the T-test
nx : int
Sample size of first group
ny : int
Sample size of second group (only needed in case of an independent
two-sample T-test)
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less".
.. warning:: One-sided Bayes Factor (BF) are simply obtained by
doubling the two-sided BF, which is not the same behavior
as R or JASP. Be extra careful when interpretating one-sided BF,
and if you can, always double-check your results.
r : float
Cauchy scale factor. Smaller values of ``r`` (e.g. 0.5), may be
appropriate when small effect sizes are expected a priori; larger
values of ``r`` are appropriate when large effect sizes are
expected (Rouder et al 2009). The default is
:math:`\sqrt{2} / 2 \approx 0.707`.
Returns
-------
bf : float
Scaled Jeffrey-Zellner-Siow (JZS) Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the
alternative hypothesis.
See also
--------
ttest : T-test
pairwise_test : Pairwise T-tests
bayesfactor_pearson : Bayes Factor of a correlation
bayesfactor_binom : Bayes Factor of a binomial test
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/tree/master/stats/BayesFactors
If you would like to compute the Bayes Factor directly from the raw data
instead of from the T-value, use the :py:func:`pingouin.ttest` function.
The JZS Bayes Factor is approximated using the formula described
in ref [1]_:
.. math::
\text{BF}_{10} = \frac{\int_{0}^{\infty}(1 + Ngr^2)^{-1/2}
(1 + \frac{t^2}{v(1 + Ngr^2)})^{-(v+1) / 2}(2\pi)^{-1/2}g^
{-3/2}e^{-1/2g}}{(1 + \frac{t^2}{v})^{-(v+1) / 2}}
where :math:`t` is the T-value, :math:`v` the degrees of freedom,
:math:`N` the sample size, :math:`r` the Cauchy scale factor
(= prior on effect size) and :math:`g` is is an auxiliary variable
that is integrated out numerically.
Results have been validated against JASP and the BayesFactor R package.
References
----------
.. [1] Rouder, J.N., Speckman, P.L., Sun, D., Morey, R.D., Iverson, G.,
2009. Bayesian t tests for accepting and rejecting the null hypothesis.
Psychon. Bull. Rev. 16, 225–237. https://doi.org/10.3758/PBR.16.2.225
Examples
--------
1. Bayes Factor of an independent two-sample T-test
>>> from pingouin import bayesfactor_ttest
>>> bf = bayesfactor_ttest(3.5, 20, 20)
>>> print("Bayes Factor: %.3f (two-sample independent)" % bf)
Bayes Factor: 26.743 (two-sample independent)
2. Bayes Factor of a paired two-sample T-test
>>> bf = bayesfactor_ttest(3.5, 20, 20, paired=True)
>>> print("Bayes Factor: %.3f (two-sample paired)" % bf)
Bayes Factor: 17.185 (two-sample paired)
3. Now specifying the direction of the test
>>> tval = -3.5
>>> bf_greater = bayesfactor_ttest(tval, 20, alternative='greater')
>>> bf_less = bayesfactor_ttest(tval, 20, alternative='less')
>>> print("BF10-greater: %.3f | BF10-less: %.3f" % (bf_greater, bf_less))
BF10-greater: 0.029 | BF10-less: 34.369
| def bayesfactor_ttest(t, nx, ny=None, paired=False, alternative="two-sided", r=0.707):
"""
Bayes Factor of a T-test.
Parameters
----------
t : float
T-value of the T-test
nx : int
Sample size of first group
ny : int
Sample size of second group (only needed in case of an independent
two-sample T-test)
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less".
.. warning:: One-sided Bayes Factor (BF) are simply obtained by
doubling the two-sided BF, which is not the same behavior
as R or JASP. Be extra careful when interpretating one-sided BF,
and if you can, always double-check your results.
r : float
Cauchy scale factor. Smaller values of ``r`` (e.g. 0.5), may be
appropriate when small effect sizes are expected a priori; larger
values of ``r`` are appropriate when large effect sizes are
expected (Rouder et al 2009). The default is
:math:`\\sqrt{2} / 2 \\approx 0.707`.
Returns
-------
bf : float
Scaled Jeffrey-Zellner-Siow (JZS) Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the
alternative hypothesis.
See also
--------
ttest : T-test
pairwise_test : Pairwise T-tests
bayesfactor_pearson : Bayes Factor of a correlation
bayesfactor_binom : Bayes Factor of a binomial test
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/tree/master/stats/BayesFactors
If you would like to compute the Bayes Factor directly from the raw data
instead of from the T-value, use the :py:func:`pingouin.ttest` function.
The JZS Bayes Factor is approximated using the formula described
in ref [1]_:
.. math::
\\text{BF}_{10} = \\frac{\\int_{0}^{\\infty}(1 + Ngr^2)^{-1/2}
(1 + \\frac{t^2}{v(1 + Ngr^2)})^{-(v+1) / 2}(2\\pi)^{-1/2}g^
{-3/2}e^{-1/2g}}{(1 + \\frac{t^2}{v})^{-(v+1) / 2}}
where :math:`t` is the T-value, :math:`v` the degrees of freedom,
:math:`N` the sample size, :math:`r` the Cauchy scale factor
(= prior on effect size) and :math:`g` is is an auxiliary variable
that is integrated out numerically.
Results have been validated against JASP and the BayesFactor R package.
References
----------
.. [1] Rouder, J.N., Speckman, P.L., Sun, D., Morey, R.D., Iverson, G.,
2009. Bayesian t tests for accepting and rejecting the null hypothesis.
Psychon. Bull. Rev. 16, 225–237. https://doi.org/10.3758/PBR.16.2.225
Examples
--------
1. Bayes Factor of an independent two-sample T-test
>>> from pingouin import bayesfactor_ttest
>>> bf = bayesfactor_ttest(3.5, 20, 20)
>>> print("Bayes Factor: %.3f (two-sample independent)" % bf)
Bayes Factor: 26.743 (two-sample independent)
2. Bayes Factor of a paired two-sample T-test
>>> bf = bayesfactor_ttest(3.5, 20, 20, paired=True)
>>> print("Bayes Factor: %.3f (two-sample paired)" % bf)
Bayes Factor: 17.185 (two-sample paired)
3. Now specifying the direction of the test
>>> tval = -3.5
>>> bf_greater = bayesfactor_ttest(tval, 20, alternative='greater')
>>> bf_less = bayesfactor_ttest(tval, 20, alternative='less')
>>> print("BF10-greater: %.3f | BF10-less: %.3f" % (bf_greater, bf_less))
BF10-greater: 0.029 | BF10-less: 34.369
"""
# Check tail
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
one_sample = True if ny is None or ny == 1 else False
# Check T-value
assert isinstance(t, (int, float)), "The T-value must be a int or a float."
if not np.isfinite(t):
return np.nan
# Function to be integrated
def fun(g, t, n, r, df):
return (
(1 + n * g * r**2) ** (-0.5)
* (1 + t**2 / ((1 + n * g * r**2) * df)) ** (-(df + 1) / 2)
* (2 * pi) ** (-0.5)
* g ** (-3.0 / 2)
* exp(-1 / (2 * g))
)
# Define n and degrees of freedom
if one_sample or paired:
n = nx
df = n - 1
else:
n = nx * ny / (nx + ny)
df = nx + ny - 2
# JZS Bayes factor calculation: eq. 1 in Rouder et al. (2009)
integr = quad(fun, 0, np.inf, args=(t, n, r, df))[0]
bf10 = 1 / ((1 + t**2 / df) ** (-(df + 1) / 2) / integr)
# Tail
tail_binary = "two-sided" if alternative == "two-sided" else "one-sided"
bf10 = bf10 * (1 / 0.5) if tail_binary == "one-sided" else bf10
# Now check the direction of the test
if ((alternative == "greater" and t < 0) or (alternative == "less" and t > 0)) and bf10 > 1:
bf10 = 1 / bf10
return bf10
| (t, nx, ny=None, paired=False, alternative='two-sided', r=0.707) |
31,979 | pingouin.multivariate | box_m | Test equality of covariance matrices using the Box's M test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Long-format dataframe.
dvs : list
Dependent variables.
group : str
Grouping variable.
alpha : float
Significance level. Default is 0.001 as recommended in [2]_. A
non-significant p-value (higher than alpha) indicates that the
covariance matrices are homogenous (= equal).
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'Chi2'``: Test statistic
* ``'pval'``: p-value
* ``'df'``: The Chi-Square statistic's degree of freedom
* ``'equal_cov'``: True if ``data`` has equal covariance
Notes
-----
.. warning:: Box's M test is susceptible to errors if the data does not
meet the assumption of multivariate normality or if the sample size is
too large or small [3]_.
Pingouin uses :py:meth:`pandas.DataFrameGroupBy.cov` to calculate the
variance-covariance matrix of each group. Missing values are automatically
excluded from the calculation by Pandas.
Mathematical expressions can be found in [1]_.
This function has been tested against the boxM package of the `biotools`
R package [4]_.
References
----------
.. [1] Rencher, A. C. (2003). Methods of multivariate analysis (Vol. 492).
John Wiley & Sons.
.. [2] Hahs-Vaughn, D. (2016). Applied Multivariate Statistical Concepts.
Taylor & Francis.
.. [3] https://en.wikipedia.org/wiki/Box%27s_M_test
.. [4] https://cran.r-project.org/web/packages/biotools/index.html
Examples
--------
1. Box M test with 3 dependent variables of 4 groups (equal sample size)
>>> import pandas as pd
>>> import pingouin as pg
>>> from scipy.stats import multivariate_normal as mvn
>>> data = pd.DataFrame(mvn.rvs(size=(100, 3), random_state=42),
... columns=['A', 'B', 'C'])
>>> data['group'] = [1] * 25 + [2] * 25 + [3] * 25 + [4] * 25
>>> data.head()
A B C group
0 0.496714 -0.138264 0.647689 1
1 1.523030 -0.234153 -0.234137 1
2 1.579213 0.767435 -0.469474 1
3 0.542560 -0.463418 -0.465730 1
4 0.241962 -1.913280 -1.724918 1
>>> pg.box_m(data, dvs=['A', 'B', 'C'], group='group')
Chi2 df pval equal_cov
box 11.634185 18.0 0.865537 True
2. Box M test with 3 dependent variables of 2 groups (unequal sample size)
>>> data = pd.DataFrame(mvn.rvs(size=(30, 2), random_state=42),
... columns=['A', 'B'])
>>> data['group'] = [1] * 20 + [2] * 10
>>> pg.box_m(data, dvs=['A', 'B'], group='group')
Chi2 df pval equal_cov
box 0.706709 3.0 0.871625 True
| def box_m(data, dvs, group, alpha=0.001):
"""Test equality of covariance matrices using the Box's M test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Long-format dataframe.
dvs : list
Dependent variables.
group : str
Grouping variable.
alpha : float
Significance level. Default is 0.001 as recommended in [2]_. A
non-significant p-value (higher than alpha) indicates that the
covariance matrices are homogenous (= equal).
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'Chi2'``: Test statistic
* ``'pval'``: p-value
* ``'df'``: The Chi-Square statistic's degree of freedom
* ``'equal_cov'``: True if ``data`` has equal covariance
Notes
-----
.. warning:: Box's M test is susceptible to errors if the data does not
meet the assumption of multivariate normality or if the sample size is
too large or small [3]_.
Pingouin uses :py:meth:`pandas.DataFrameGroupBy.cov` to calculate the
variance-covariance matrix of each group. Missing values are automatically
excluded from the calculation by Pandas.
Mathematical expressions can be found in [1]_.
This function has been tested against the boxM package of the `biotools`
R package [4]_.
References
----------
.. [1] Rencher, A. C. (2003). Methods of multivariate analysis (Vol. 492).
John Wiley & Sons.
.. [2] Hahs-Vaughn, D. (2016). Applied Multivariate Statistical Concepts.
Taylor & Francis.
.. [3] https://en.wikipedia.org/wiki/Box%27s_M_test
.. [4] https://cran.r-project.org/web/packages/biotools/index.html
Examples
--------
1. Box M test with 3 dependent variables of 4 groups (equal sample size)
>>> import pandas as pd
>>> import pingouin as pg
>>> from scipy.stats import multivariate_normal as mvn
>>> data = pd.DataFrame(mvn.rvs(size=(100, 3), random_state=42),
... columns=['A', 'B', 'C'])
>>> data['group'] = [1] * 25 + [2] * 25 + [3] * 25 + [4] * 25
>>> data.head()
A B C group
0 0.496714 -0.138264 0.647689 1
1 1.523030 -0.234153 -0.234137 1
2 1.579213 0.767435 -0.469474 1
3 0.542560 -0.463418 -0.465730 1
4 0.241962 -1.913280 -1.724918 1
>>> pg.box_m(data, dvs=['A', 'B', 'C'], group='group')
Chi2 df pval equal_cov
box 11.634185 18.0 0.865537 True
2. Box M test with 3 dependent variables of 2 groups (unequal sample size)
>>> data = pd.DataFrame(mvn.rvs(size=(30, 2), random_state=42),
... columns=['A', 'B'])
>>> data['group'] = [1] * 20 + [2] * 10
>>> pg.box_m(data, dvs=['A', 'B'], group='group')
Chi2 df pval equal_cov
box 0.706709 3.0 0.871625 True
"""
# Safety checks
from scipy.stats import chi2
assert isinstance(data, pd.DataFrame), "data must be a pandas dataframe."
assert group in data.columns, "The grouping variable is not in data."
assert set(dvs).issubset(data.columns), "The DVs are not in data."
grp = data.groupby(group, observed=True)[dvs]
assert grp.ngroups > 1, "Data must have at least two columns."
# Calculate covariance matrix and descriptive statistics
# - n_covs is the number of covariance matrices
# - n_dvs is the number of variables
# - n_samp is the number of samples in each covariance matrix
# - nobs is the total number of observations
covs = grp.cov(numeric_only=True)
n_covs, n_dvs = covs.index.levshape
n_samp = grp.count().iloc[:, 0].to_numpy() # NaN are excluded by .count
nobs = n_samp.sum()
v = n_samp - 1
# Calculate pooled covariance matrix (S) and M statistics
covs = covs.to_numpy().reshape(n_covs, n_dvs, -1)
S = (covs * v[..., None, None]).sum(axis=0) / (nobs - n_covs)
# The following lines might raise an error if the covariance matrices are
# not invertible (e.g. missing values in input).
S_det = np.linalg.det(S)
M = ((np.linalg.det(covs) / S_det) ** (v / 2)).prod()
# Calculate C in reference [1] (page 257-259)
if len(np.unique(n_samp)) == 1:
# All groups have same number of samples
c = ((n_covs + 1) * (2 * n_dvs**2 + 3 * n_dvs - 1)) / (
6 * n_covs * (n_dvs + 1) * (nobs / n_covs - 1)
)
else:
# Unequal sample size
c = (2 * n_dvs**2 + 3 * n_dvs - 1) / (6 * (n_dvs + 1) * (n_covs - 1))
c *= (1 / v).sum() - 1 / v.sum()
# Calculate U statistics and degree of fredom
u = -2 * (1 - c) * np.log(M)
df = 0.5 * n_dvs * (n_dvs + 1) * (n_covs - 1)
p = chi2.sf(u, df)
equal_cov = True if p > alpha else False
stats = pd.DataFrame(
index=["box"], data={"Chi2": [u], "df": [df], "pval": [p], "equal_cov": [equal_cov]}
)
return _postprocess_dataframe(stats)
| (data, dvs, group, alpha=0.001) |
31,980 | pingouin.contingency | chi2_independence |
Chi-squared independence tests between two categorical variables.
The test is computed for different values of :math:`\lambda`: 1, 2/3, 0,
-1/2, -1 and -2 (Cressie and Read, 1984).
Parameters
----------
data : :py:class:`pandas.DataFrame`
The dataframe containing the ocurrences for the test.
x, y : string
The variables names for the Chi-squared test. Must be names of columns
in ``data``.
correction : bool
Whether to apply Yates' correction when the degree of freedom of the
observed contingency table is 1 (Yates 1934).
Returns
-------
expected : :py:class:`pandas.DataFrame`
The expected contingency table of frequencies.
observed : :py:class:`pandas.DataFrame`
The (corrected or not) observed contingency table of frequencies.
stats : :py:class:`pandas.DataFrame`
The test summary, containing four columns:
* ``'test'``: The statistic name
* ``'lambda'``: The :math:`\lambda` value used for the power divergence statistic
* ``'chi2'``: The test statistic
* ``'pval'``: The p-value of the test
* ``'cramer'``: The Cramer's V effect size
* ``'power'``: The statistical power of the test
Notes
-----
From Wikipedia:
*The chi-squared test is used to determine whether there is a significant
difference between the expected frequencies and the observed frequencies
in one or more categories.*
As application examples, this test can be used to *i*) evaluate the
quality of a categorical variable in a classification problem or to *ii*)
check the similarity between two categorical variables. In the first
example, a good categorical predictor and the class column should present
high :math:`\chi^2` and low p-value. In the second example, similar
categorical variables should present low :math:`\chi^2` and high p-value.
This function is a wrapper around the
:py:func:`scipy.stats.power_divergence` function.
.. warning :: As a general guideline for the consistency of this test, the
observed and the expected contingency tables should not have cells
with frequencies lower than 5.
References
----------
* Cressie, N., & Read, T. R. (1984). Multinomial goodness‐of‐fit
tests. Journal of the Royal Statistical Society: Series B
(Methodological), 46(3), 440-464.
* Yates, F. (1934). Contingency Tables Involving Small Numbers and the
:math:`\chi^2` Test. Supplement to the Journal of the Royal
Statistical Society, 1, 217-235.
Examples
--------
Let's see if gender is a good categorical predictor for the presence of
heart disease.
>>> import pingouin as pg
>>> data = pg.read_dataset('chi2_independence')
>>> data['sex'].value_counts(ascending=True)
sex
0 96
1 207
Name: count, dtype: int64
If gender is not a good predictor for heart disease, we should expect the
same 96:207 ratio across the target classes.
>>> expected, observed, stats = pg.chi2_independence(data, x='sex',
... y='target')
>>> expected
target 0 1
sex
0 43.722772 52.277228
1 94.277228 112.722772
Let's see what the data tells us.
>>> observed
target 0 1
sex
0 24.5 71.5
1 113.5 93.5
The proportion is lower on the class 0 and higher on the class 1. The
tests should be sensitive to this difference.
>>> stats.round(3)
test lambda chi2 dof pval cramer power
0 pearson 1.000 22.717 1.0 0.0 0.274 0.997
1 cressie-read 0.667 22.931 1.0 0.0 0.275 0.998
2 log-likelihood 0.000 23.557 1.0 0.0 0.279 0.998
3 freeman-tukey -0.500 24.220 1.0 0.0 0.283 0.998
4 mod-log-likelihood -1.000 25.071 1.0 0.0 0.288 0.999
5 neyman -2.000 27.458 1.0 0.0 0.301 0.999
Very low p-values indeed. The gender qualifies as a good predictor for the
presence of heart disease on this dataset.
| def chi2_independence(data, x, y, correction=True):
"""
Chi-squared independence tests between two categorical variables.
The test is computed for different values of :math:`\\lambda`: 1, 2/3, 0,
-1/2, -1 and -2 (Cressie and Read, 1984).
Parameters
----------
data : :py:class:`pandas.DataFrame`
The dataframe containing the ocurrences for the test.
x, y : string
The variables names for the Chi-squared test. Must be names of columns
in ``data``.
correction : bool
Whether to apply Yates' correction when the degree of freedom of the
observed contingency table is 1 (Yates 1934).
Returns
-------
expected : :py:class:`pandas.DataFrame`
The expected contingency table of frequencies.
observed : :py:class:`pandas.DataFrame`
The (corrected or not) observed contingency table of frequencies.
stats : :py:class:`pandas.DataFrame`
The test summary, containing four columns:
* ``'test'``: The statistic name
* ``'lambda'``: The :math:`\\lambda` value used for the power\
divergence statistic
* ``'chi2'``: The test statistic
* ``'pval'``: The p-value of the test
* ``'cramer'``: The Cramer's V effect size
* ``'power'``: The statistical power of the test
Notes
-----
From Wikipedia:
*The chi-squared test is used to determine whether there is a significant
difference between the expected frequencies and the observed frequencies
in one or more categories.*
As application examples, this test can be used to *i*) evaluate the
quality of a categorical variable in a classification problem or to *ii*)
check the similarity between two categorical variables. In the first
example, a good categorical predictor and the class column should present
high :math:`\\chi^2` and low p-value. In the second example, similar
categorical variables should present low :math:`\\chi^2` and high p-value.
This function is a wrapper around the
:py:func:`scipy.stats.power_divergence` function.
.. warning :: As a general guideline for the consistency of this test, the
observed and the expected contingency tables should not have cells
with frequencies lower than 5.
References
----------
* Cressie, N., & Read, T. R. (1984). Multinomial goodness‐of‐fit
tests. Journal of the Royal Statistical Society: Series B
(Methodological), 46(3), 440-464.
* Yates, F. (1934). Contingency Tables Involving Small Numbers and the
:math:`\\chi^2` Test. Supplement to the Journal of the Royal
Statistical Society, 1, 217-235.
Examples
--------
Let's see if gender is a good categorical predictor for the presence of
heart disease.
>>> import pingouin as pg
>>> data = pg.read_dataset('chi2_independence')
>>> data['sex'].value_counts(ascending=True)
sex
0 96
1 207
Name: count, dtype: int64
If gender is not a good predictor for heart disease, we should expect the
same 96:207 ratio across the target classes.
>>> expected, observed, stats = pg.chi2_independence(data, x='sex',
... y='target')
>>> expected
target 0 1
sex
0 43.722772 52.277228
1 94.277228 112.722772
Let's see what the data tells us.
>>> observed
target 0 1
sex
0 24.5 71.5
1 113.5 93.5
The proportion is lower on the class 0 and higher on the class 1. The
tests should be sensitive to this difference.
>>> stats.round(3)
test lambda chi2 dof pval cramer power
0 pearson 1.000 22.717 1.0 0.0 0.274 0.997
1 cressie-read 0.667 22.931 1.0 0.0 0.275 0.998
2 log-likelihood 0.000 23.557 1.0 0.0 0.279 0.998
3 freeman-tukey -0.500 24.220 1.0 0.0 0.283 0.998
4 mod-log-likelihood -1.000 25.071 1.0 0.0 0.288 0.999
5 neyman -2.000 27.458 1.0 0.0 0.301 0.999
Very low p-values indeed. The gender qualifies as a good predictor for the
presence of heart disease on this dataset.
"""
# Python code inspired by SciPy's chi2_contingency
assert isinstance(data, pd.DataFrame), "data must be a pandas DataFrame."
assert isinstance(x, (str, int)), "x must be a string or int."
assert isinstance(y, (str, int)), "y must be a string or int."
assert all(col in data.columns for col in (x, y)), "columns are not in dataframe."
assert isinstance(correction, bool), "correction must be a boolean."
observed = pd.crosstab(data[x], data[y])
if observed.size == 0:
raise ValueError("No data; observed has size 0.")
expected = pd.DataFrame(expected_freq(observed), index=observed.index, columns=observed.columns)
# All count frequencies should be at least 5
for df, name in zip([observed, expected], ["observed", "expected"]):
if (df < 5).any(axis=None):
warnings.warn(f"Low count on {name} frequencies.")
dof = float(expected.size - sum(expected.shape) + expected.ndim - 1)
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
observed = observed + 0.5 * np.sign(expected - observed)
ddof = observed.size - 1 - dof
n = data.shape[0]
stats = []
names = [
"pearson",
"cressie-read",
"log-likelihood",
"freeman-tukey",
"mod-log-likelihood",
"neyman",
]
for name, lambda_ in zip(names, [1.0, 2 / 3, 0.0, -1 / 2, -1.0, -2.0]):
if dof == 0:
chi2, p, cramer, power = 0.0, 1.0, np.nan, np.nan
else:
chi2, p = power_divergence(observed, expected, ddof=ddof, axis=None, lambda_=lambda_)
dof_cramer = min(expected.shape) - 1
cramer = np.sqrt(chi2 / (n * dof_cramer))
power = power_chi2(dof=dof, w=cramer, n=n, alpha=0.05)
stats.append(
{
"test": name,
"lambda": lambda_,
"chi2": chi2,
"dof": dof,
"pval": p,
"cramer": cramer,
"power": power,
}
)
stats = pd.DataFrame(stats)[["test", "lambda", "chi2", "dof", "pval", "cramer", "power"]]
return expected, observed, _postprocess_dataframe(stats)
| (data, x, y, correction=True) |
31,981 | pingouin.contingency | chi2_mcnemar |
Performs the exact and approximated versions of McNemar's test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
The dataframe containing the ocurrences for the test. Each row must
represent either a subject or a pair of subjects.
x, y : string
The variables names for the McNemar's test. Must be names of columns
in ``data``.
If each row of ``data`` represents a subject, then ``x`` and ``y`` must
be columns containing dichotomous measurements in two different
contexts. For instance: the presence of pain before and after a certain
treatment.
If each row of ``data`` represents a pair of subjects, then ``x`` and
``y`` must be columns containing dichotomous measurements for each of
the subjects. For instance: a positive response to a certain drug in
the control group and in the test group, supposing that each pair
contains a subject in each group.
The 2x2 crosstab is created using the
:py:func:`pingouin.dichotomous_crosstab` function.
.. warning:: Missing values are not allowed.
correction : bool
Whether to apply the correction for continuity (Edwards, A. 1948).
Returns
-------
observed : :py:class:`pandas.DataFrame`
The observed contingency table of frequencies.
stats : :py:class:`pandas.DataFrame`
The test summary:
* ``'chi2'``: The test statistic
* ``'dof'``: The degree of freedom
* ``'p-approx'``: The approximated p-value
* ``'p-exact'``: The exact p-value
Notes
-----
The McNemar's test is compatible with dichotomous paired data, generally
used to assert the effectiveness of a certain procedure, such as a
treatment or the use of a drug. "Dichotomous" means that the values of the
measurements are binary. "Paired data" means that each measurement is done
twice, either on the same subject in two different moments or in two
similar (paired) subjects from different groups (e.g.: control/test). In
order to better understand the idea behind McNemar's test, let's illustrate
it with an example.
Suppose that we wanted to compare the effectiveness of two different
treatments (X and Y) for athlete's foot on a certain group of `n` people.
To achieve this, we measured their responses to such treatments on each
foot. The observed data summary was:
* Number of people with good responses to X and Y: `a`
* Number of people with good response to X and bad response to Y: `b`
* Number of people with bad response to X and good response to Y: `c`
* Number of people with bad responses to X and Y: `d`
Now consider the two groups:
1. The group of people who had good response to X (`a` + `b` subjects)
2. The group of people who had good response to Y (`a` + `c` subjects)
If the treatments have the same effectiveness, we should expect the
probabilities of having good responses to be the same, regardless of the
treatment. Mathematically, such statement can be translated into the
following equation:
.. math::
\frac{a+b}{n} = \frac{a+c}{n} \Rightarrow b = c
Thus, this test should indicate higher statistical significances for higher
distances between `b` and `c` (McNemar, Q. 1947):
.. math::
\chi^2 = \frac{(b - c)^2}{b + c}
References
----------
* Edwards, A. L. (1948). Note on the "correction for continuity" in
testing the significance of the difference between correlated
proportions. Psychometrika, 13(3), 185-187.
* McNemar, Q. (1947). Note on the sampling error of the difference
between correlated proportions or percentages. Psychometrika, 12(2),
153-157.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('chi2_mcnemar')
>>> observed, stats = pg.chi2_mcnemar(data, 'treatment_X', 'treatment_Y')
>>> observed
treatment_Y 0 1
treatment_X
0 20 40
1 8 12
In this case, `c` (40) seems to be a significantly greater than `b` (8).
The McNemar test should be sensitive to this.
>>> stats
chi2 dof p-approx p-exact
mcnemar 20.020833 1 0.000008 0.000003
| def chi2_mcnemar(data, x, y, correction=True):
"""
Performs the exact and approximated versions of McNemar's test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
The dataframe containing the ocurrences for the test. Each row must
represent either a subject or a pair of subjects.
x, y : string
The variables names for the McNemar's test. Must be names of columns
in ``data``.
If each row of ``data`` represents a subject, then ``x`` and ``y`` must
be columns containing dichotomous measurements in two different
contexts. For instance: the presence of pain before and after a certain
treatment.
If each row of ``data`` represents a pair of subjects, then ``x`` and
``y`` must be columns containing dichotomous measurements for each of
the subjects. For instance: a positive response to a certain drug in
the control group and in the test group, supposing that each pair
contains a subject in each group.
The 2x2 crosstab is created using the
:py:func:`pingouin.dichotomous_crosstab` function.
.. warning:: Missing values are not allowed.
correction : bool
Whether to apply the correction for continuity (Edwards, A. 1948).
Returns
-------
observed : :py:class:`pandas.DataFrame`
The observed contingency table of frequencies.
stats : :py:class:`pandas.DataFrame`
The test summary:
* ``'chi2'``: The test statistic
* ``'dof'``: The degree of freedom
* ``'p-approx'``: The approximated p-value
* ``'p-exact'``: The exact p-value
Notes
-----
The McNemar's test is compatible with dichotomous paired data, generally
used to assert the effectiveness of a certain procedure, such as a
treatment or the use of a drug. "Dichotomous" means that the values of the
measurements are binary. "Paired data" means that each measurement is done
twice, either on the same subject in two different moments or in two
similar (paired) subjects from different groups (e.g.: control/test). In
order to better understand the idea behind McNemar's test, let's illustrate
it with an example.
Suppose that we wanted to compare the effectiveness of two different
treatments (X and Y) for athlete's foot on a certain group of `n` people.
To achieve this, we measured their responses to such treatments on each
foot. The observed data summary was:
* Number of people with good responses to X and Y: `a`
* Number of people with good response to X and bad response to Y: `b`
* Number of people with bad response to X and good response to Y: `c`
* Number of people with bad responses to X and Y: `d`
Now consider the two groups:
1. The group of people who had good response to X (`a` + `b` subjects)
2. The group of people who had good response to Y (`a` + `c` subjects)
If the treatments have the same effectiveness, we should expect the
probabilities of having good responses to be the same, regardless of the
treatment. Mathematically, such statement can be translated into the
following equation:
.. math::
\\frac{a+b}{n} = \\frac{a+c}{n} \\Rightarrow b = c
Thus, this test should indicate higher statistical significances for higher
distances between `b` and `c` (McNemar, Q. 1947):
.. math::
\\chi^2 = \\frac{(b - c)^2}{b + c}
References
----------
* Edwards, A. L. (1948). Note on the "correction for continuity" in
testing the significance of the difference between correlated
proportions. Psychometrika, 13(3), 185-187.
* McNemar, Q. (1947). Note on the sampling error of the difference
between correlated proportions or percentages. Psychometrika, 12(2),
153-157.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('chi2_mcnemar')
>>> observed, stats = pg.chi2_mcnemar(data, 'treatment_X', 'treatment_Y')
>>> observed
treatment_Y 0 1
treatment_X
0 20 40
1 8 12
In this case, `c` (40) seems to be a significantly greater than `b` (8).
The McNemar test should be sensitive to this.
>>> stats
chi2 dof p-approx p-exact
mcnemar 20.020833 1 0.000008 0.000003
"""
# Python code initially inspired by statsmodel's mcnemar
assert isinstance(data, pd.DataFrame), "data must be a pandas DataFrame."
assert all(
isinstance(column, (str, int)) for column in (x, y)
), "column names must be string or int."
assert all(column in data.columns for column in (x, y)), "columns are not in dataframe."
for column in (x, y):
if data[column].isna().any():
raise ValueError("Null values are not allowed.")
observed = dichotomous_crosstab(data, x, y)
# Careful, the order of b and c is inverted compared to wikipedia
# because the colums / rows of the crosstab is [0, 1] and not [1, 0].
c, b = observed.at[0, 1], observed.at[1, 0]
n_discordants = b + c
if (b, c) == (0, 0):
raise ValueError(
"McNemar's test does not work if the secondary "
+ "diagonal of the observed data summary does not "
+ "have values different from 0."
)
chi2 = (abs(b - c) - int(correction)) ** 2 / n_discordants
pexact = min(1, 2 * binom.cdf(min(b, c), n_discordants, 0.5))
stats = {
"chi2": chi2,
"dof": 1,
"p-approx": sp_chi2.sf(chi2, 1),
"p-exact": pexact,
# 'p-mid': pexact - binom.pmf(b, n_discordants, 0.5)
}
stats = pd.DataFrame(stats, index=["mcnemar"])
return observed, _postprocess_dataframe(stats)
| (data, x, y, correction=True) |
31,982 | pingouin.circular | circ_axial | Transforms n-axial data to a common scale.
Parameters
----------
angles : array
Sample of angles in radians
n : int
Number of modes
Returns
-------
angles : float
Transformed angles
Notes
-----
Tranform data with multiple modes (known as axial data) to a unimodal
sample, for the purpose of certain analysis such as computation of a
mean resultant vector (see Berens 2009).
Examples
--------
Transform degrees to unimodal radians in the Berens 2009 neuro dataset.
>>> import numpy as np
>>> from pingouin import read_dataset
>>> from pingouin.circular import circ_axial
>>> df = read_dataset('circular')
>>> angles = df['Orientation'].to_numpy()
>>> angles = circ_axial(np.deg2rad(angles), 2)
| def circ_axial(angles, n):
"""Transforms n-axial data to a common scale.
Parameters
----------
angles : array
Sample of angles in radians
n : int
Number of modes
Returns
-------
angles : float
Transformed angles
Notes
-----
Tranform data with multiple modes (known as axial data) to a unimodal
sample, for the purpose of certain analysis such as computation of a
mean resultant vector (see Berens 2009).
Examples
--------
Transform degrees to unimodal radians in the Berens 2009 neuro dataset.
>>> import numpy as np
>>> from pingouin import read_dataset
>>> from pingouin.circular import circ_axial
>>> df = read_dataset('circular')
>>> angles = df['Orientation'].to_numpy()
>>> angles = circ_axial(np.deg2rad(angles), 2)
"""
angles = np.asarray(angles)
return np.remainder(angles * n, 2 * np.pi)
| (angles, n) |
31,983 | pingouin.circular | circ_corrcc | Correlation coefficient between two circular variables.
Parameters
----------
x : 1-D array_like
First circular variable (expressed in radians).
y : 1-D array_like
Second circular variable (expressed in radians).
correction_uniform : bool
Use correction for uniform marginals.
Returns
-------
r : float
Correlation coefficient.
pval : float
Uncorrected p-value.
Notes
-----
Adapted from the CircStats MATLAB toolbox [1]_.
The range of ``x`` and ``y`` must be either
:math:`[0, 2\pi]` or :math:`[-\pi, \pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
Please note that NaN are automatically removed.
If the ``correction_uniform`` is True, an alternative equation from
[2]_ (p. 177) is used. If the marginal distribution of ``x`` or ``y`` is
uniform, the mean is not well defined, which leads to wrong estimates
of the circular correlation. The alternative equation corrects for this
by choosing the means in a way that maximizes the positive or negative
correlation.
References
----------
.. [1] Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10), 1–21.
https://doi.org/10.18637/jss.v031.i10
.. [2] Jammalamadaka, S. R., & Sengupta, A. (2001). Topics in circular
statistics (Vol. 5). world scientific.
Examples
--------
Compute the r and p-value of two circular variables
>>> from pingouin import circ_corrcc
>>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
>>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
>>> r, pval = circ_corrcc(x, y)
>>> print(round(r, 3), round(pval, 4))
0.942 0.0658
With the correction for uniform marginals
>>> r, pval = circ_corrcc(x, y, correction_uniform=True)
>>> print(round(r, 3), round(pval, 4))
0.547 0.2859
| def circ_corrcc(x, y, correction_uniform=False):
"""Correlation coefficient between two circular variables.
Parameters
----------
x : 1-D array_like
First circular variable (expressed in radians).
y : 1-D array_like
Second circular variable (expressed in radians).
correction_uniform : bool
Use correction for uniform marginals.
Returns
-------
r : float
Correlation coefficient.
pval : float
Uncorrected p-value.
Notes
-----
Adapted from the CircStats MATLAB toolbox [1]_.
The range of ``x`` and ``y`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
Please note that NaN are automatically removed.
If the ``correction_uniform`` is True, an alternative equation from
[2]_ (p. 177) is used. If the marginal distribution of ``x`` or ``y`` is
uniform, the mean is not well defined, which leads to wrong estimates
of the circular correlation. The alternative equation corrects for this
by choosing the means in a way that maximizes the positive or negative
correlation.
References
----------
.. [1] Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10), 1–21.
https://doi.org/10.18637/jss.v031.i10
.. [2] Jammalamadaka, S. R., & Sengupta, A. (2001). Topics in circular
statistics (Vol. 5). world scientific.
Examples
--------
Compute the r and p-value of two circular variables
>>> from pingouin import circ_corrcc
>>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
>>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
>>> r, pval = circ_corrcc(x, y)
>>> print(round(r, 3), round(pval, 4))
0.942 0.0658
With the correction for uniform marginals
>>> r, pval = circ_corrcc(x, y, correction_uniform=True)
>>> print(round(r, 3), round(pval, 4))
0.547 0.2859
"""
x = np.asarray(x)
y = np.asarray(y)
assert x.size == y.size, "x and y must have the same length."
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficient
x_sin = np.sin(x - circ_mean(x))
y_sin = np.sin(y - circ_mean(y))
if not correction_uniform:
# Similar to np.corrcoef(x_sin, y_sin)[0][1]
r = np.sum(x_sin * y_sin) / np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))
else:
r_minus = np.abs(np.sum(np.exp((x - y) * 1j)))
r_plus = np.abs(np.sum(np.exp((x + y) * 1j)))
denom = 2 * np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))
r = (r_minus - r_plus) / denom
# Compute T- and p-values
tval = (
np.sqrt((n * (x_sin**2).mean() * (y_sin**2).mean()) / np.mean(x_sin**2 * y_sin**2))
* r
)
# Approximately distributed as a standard normal
pval = 2 * norm.sf(abs(tval))
return r, pval
| (x, y, correction_uniform=False) |
31,984 | pingouin.circular | circ_corrcl | Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : 1-D array_like
First circular variable (expressed in radians).
The range of ``x`` must be either :math:`[0, 2\pi]` or
:math:`[-\pi, \pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
y : 1-D array_like
Second circular variable (linear)
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(round(r, 3), round(pval, 3))
0.109 0.971
| def circ_corrcl(x, y):
"""Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : 1-D array_like
First circular variable (expressed in radians).
The range of ``x`` must be either :math:`[0, 2\\pi]` or
:math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
y : 1-D array_like
Second circular variable (linear)
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(round(r, 3), round(pval, 3))
0.109 0.971
"""
from scipy.stats import pearsonr, chi2
x = np.asarray(x)
y = np.asarray(y)
assert x.size == y.size, "x and y must have the same length."
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficent for sin and cos independently
rxs = pearsonr(y, np.sin(x))[0]
rxc = pearsonr(y, np.cos(x))[0]
rcs = pearsonr(np.sin(x), np.cos(x))[0]
# Compute angular-linear correlation (equ. 27.47)
r = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2))
# Compute p-value
pval = chi2.sf(n * r**2, 2)
return r, pval
| (x, y) |
31,985 | pingouin.circular | circ_mean | Mean direction for (binned) circular data.
Parameters
----------
angles : array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\pi]` or :math:`[-\pi, \pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
axis : int or None
Compute along this dimension. Default is the first axis (0).
Returns
-------
mu : float
Circular mean, in radians.
See also
--------
scipy.stats.circmean, scipy.stats.circstd, pingouin.circ_r
Notes
-----
From Wikipedia:
*In mathematics, a mean of circular quantities is a mean which is sometimes
better-suited for quantities like angles, daytimes, and fractional parts
of real numbers. This is necessary since most of the usual means may not be
appropriate on circular quantities. For example, the arithmetic mean of 0°
and 360° is 180°, which is misleading because for most purposes 360° is
the same thing as 0°.
As another example, the "average time" between 11 PM and 1 AM is either
midnight or noon, depending on whether the two times are part of a single
night or part of a single calendar day.*
The circular mean of a set of angles :math:`\alpha` is defined by:
.. math::
\bar{\alpha} = \text{angle} \left ( \sum_{j=1}^n \exp(i \cdot
\alpha_j) \right )
For binned angles with weights :math:`w`, this becomes:
.. math::
\bar{\alpha} = \text{angle} \left ( \sum_{j=1}^n w \cdot
\exp(i \cdot \alpha_j) \right )
Missing values in ``angles`` are omitted from the calculations.
References
----------
* https://en.wikipedia.org/wiki/Mean_of_circular_quantities
* Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10),
1–21. https://doi.org/10.18637/jss.v031.i10
Examples
--------
1. Circular mean of a 1-D array of angles, in radians
>>> import pingouin as pg
>>> angles = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> round(pg.circ_mean(angles), 4)
1.013
Compare with SciPy:
>>> from scipy.stats import circmean
>>> import numpy as np
>>> round(circmean(angles, low=0, high=2*np.pi), 4)
1.013
2. Using a 2-D array of angles in degrees
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 5))
>>> deg
array([[322, 98, 230, 17, 83],
[106, 123, 57, 214, 225],
[ 96, 113, 126, 47, 73]])
We first need to convert from degrees to radians:
>>> rad = np.round(pg.convert_angles(deg, low=0, high=360), 4)
>>> rad
array([[-0.6632, 1.7104, -2.2689, 0.2967, 1.4486],
[ 1.85 , 2.1468, 0.9948, -2.5482, -2.3562],
[ 1.6755, 1.9722, 2.1991, 0.8203, 1.2741]])
>>> pg.circ_mean(rad) # On the first axis (default)
array([1.27532162, 1.94336576, 2.23195927, 0.52110503, 1.80240563])
>>> pg.circ_mean(rad, axis=-1) # On the last axis (default)
array([0.68920819, 2.49334852, 1.5954149 ])
>>> round(pg.circ_mean(rad, axis=None), 4) # Across the entire array
1.6954
Missing values are omitted from the calculations:
>>> rad[0, 0] = np.nan
>>> pg.circ_mean(rad)
array([1.76275 , 1.94336576, 2.23195927, 0.52110503, 1.80240563])
3. Using binned angles
>>> np.random.seed(123)
>>> nbins = 18 # Number of bins to divide the unit circle
>>> angles_bins = np.linspace(0, 2 * np.pi, nbins)
>>> # w represents the number of incidences per bins, or "weights".
>>> w = np.random.randint(low=0, high=5, size=angles_bins.size)
>>> round(pg.circ_mean(angles_bins, w), 4)
0.606
| def circ_mean(angles, w=None, axis=0):
"""Mean direction for (binned) circular data.
Parameters
----------
angles : array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
axis : int or None
Compute along this dimension. Default is the first axis (0).
Returns
-------
mu : float
Circular mean, in radians.
See also
--------
scipy.stats.circmean, scipy.stats.circstd, pingouin.circ_r
Notes
-----
From Wikipedia:
*In mathematics, a mean of circular quantities is a mean which is sometimes
better-suited for quantities like angles, daytimes, and fractional parts
of real numbers. This is necessary since most of the usual means may not be
appropriate on circular quantities. For example, the arithmetic mean of 0°
and 360° is 180°, which is misleading because for most purposes 360° is
the same thing as 0°.
As another example, the "average time" between 11 PM and 1 AM is either
midnight or noon, depending on whether the two times are part of a single
night or part of a single calendar day.*
The circular mean of a set of angles :math:`\\alpha` is defined by:
.. math::
\\bar{\\alpha} = \\text{angle} \\left ( \\sum_{j=1}^n \\exp(i \\cdot
\\alpha_j) \\right )
For binned angles with weights :math:`w`, this becomes:
.. math::
\\bar{\\alpha} = \\text{angle} \\left ( \\sum_{j=1}^n w \\cdot
\\exp(i \\cdot \\alpha_j) \\right )
Missing values in ``angles`` are omitted from the calculations.
References
----------
* https://en.wikipedia.org/wiki/Mean_of_circular_quantities
* Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10),
1–21. https://doi.org/10.18637/jss.v031.i10
Examples
--------
1. Circular mean of a 1-D array of angles, in radians
>>> import pingouin as pg
>>> angles = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> round(pg.circ_mean(angles), 4)
1.013
Compare with SciPy:
>>> from scipy.stats import circmean
>>> import numpy as np
>>> round(circmean(angles, low=0, high=2*np.pi), 4)
1.013
2. Using a 2-D array of angles in degrees
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 5))
>>> deg
array([[322, 98, 230, 17, 83],
[106, 123, 57, 214, 225],
[ 96, 113, 126, 47, 73]])
We first need to convert from degrees to radians:
>>> rad = np.round(pg.convert_angles(deg, low=0, high=360), 4)
>>> rad
array([[-0.6632, 1.7104, -2.2689, 0.2967, 1.4486],
[ 1.85 , 2.1468, 0.9948, -2.5482, -2.3562],
[ 1.6755, 1.9722, 2.1991, 0.8203, 1.2741]])
>>> pg.circ_mean(rad) # On the first axis (default)
array([1.27532162, 1.94336576, 2.23195927, 0.52110503, 1.80240563])
>>> pg.circ_mean(rad, axis=-1) # On the last axis (default)
array([0.68920819, 2.49334852, 1.5954149 ])
>>> round(pg.circ_mean(rad, axis=None), 4) # Across the entire array
1.6954
Missing values are omitted from the calculations:
>>> rad[0, 0] = np.nan
>>> pg.circ_mean(rad)
array([1.76275 , 1.94336576, 2.23195927, 0.52110503, 1.80240563])
3. Using binned angles
>>> np.random.seed(123)
>>> nbins = 18 # Number of bins to divide the unit circle
>>> angles_bins = np.linspace(0, 2 * np.pi, nbins)
>>> # w represents the number of incidences per bins, or "weights".
>>> w = np.random.randint(low=0, high=5, size=angles_bins.size)
>>> round(pg.circ_mean(angles_bins, w), 4)
0.606
"""
angles = np.asarray(angles)
_checkangles(angles) # Check that angles is in radians
w = np.asarray(w) if w is not None else np.ones(angles.shape)
assert angles.shape == w.shape, "Input dimensions do not match"
return np.angle(np.nansum(np.multiply(w, np.exp(1j * angles)), axis=axis))
| (angles, w=None, axis=0) |
31,986 | pingouin.circular | circ_r | Mean resultant vector length for circular data.
Parameters
----------
angles : array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\pi]` or :math:`[-\pi, \pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
axis : int or None
Compute along this dimension. Default is the first axis (0).
Returns
-------
r : float
Circular mean vector length.
See also
--------
pingouin.circ_mean
Notes
-----
The length of the mean resultant vector is a crucial quantity for the
measurement of circular spread or hypothesis testing in directional
statistics. The closer it is to one, the more concentrated the data
sample is around the mean direction (Berens 2009).
The circular vector length of a set of angles :math:`\alpha` is defined
by:
.. math::
\bar{\alpha} = \frac{1}{N}\left \| \sum_{j=1}^n
\exp(i \cdot \alpha_j) \right \|
Missing values in ``angles`` are omitted from the calculations.
References
----------
* https://en.wikipedia.org/wiki/Mean_of_circular_quantities
* Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10),
1–21. https://doi.org/10.18637/jss.v031.i10
Examples
--------
1. Mean resultant vector length of a 1-D array of angles, in radians
>>> import pingouin as pg
>>> angles = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> r = pg.circ_r(angles)
>>> round(r, 4)
0.4972
Note that there is a close relationship between the vector length and the
circular standard deviation, i.e. :math:`\sigma = \sqrt{-2 \ln R}`:
>>> import numpy as np
>>> round(np.sqrt(-2 * np.log(r)), 4)
1.1821
which gives similar result as SciPy built-in function:
>>> from scipy.stats import circstd
>>> round(circstd(angles), 4)
1.1821
Sanity check: if all angles are the same, the vector length should be one:
>>> angles = [3.14, 3.14, 3.14, 3.14]
>>> round(pg.circ_r(angles), 4)
1.0
2. Using a 2-D array of angles in degrees
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 5))
>>> deg
array([[322, 98, 230, 17, 83],
[106, 123, 57, 214, 225],
[ 96, 113, 126, 47, 73]])
We first need to convert from degrees to radians:
>>> rad = np.round(pg.convert_angles(deg, low=0, high=360), 4)
>>> rad
array([[-0.6632, 1.7104, -2.2689, 0.2967, 1.4486],
[ 1.85 , 2.1468, 0.9948, -2.5482, -2.3562],
[ 1.6755, 1.9722, 2.1991, 0.8203, 1.2741]])
>>> pg.circ_r(rad) # On the first axis (default)
array([0.46695499, 0.98398294, 0.3723287 , 0.31103746, 0.42527149])
>>> pg.circ_r(rad, axis=-1) # On the last axis (default)
array([0.28099998, 0.45456096, 0.88261161])
>>> round(pg.circ_r(rad, axis=None), 4) # Across the entire array
0.4486
Missing values are omitted from the calculations:
>>> rad[0, 0] = np.nan
>>> pg.circ_r(rad)
array([0.99619613, 0.98398294, 0.3723287 , 0.31103746, 0.42527149])
3. Using binned angles
>>> np.random.seed(123)
>>> nbins = 18 # Number of bins to divide the unit circle
>>> angles_bins = np.linspace(0, 2 * np.pi, nbins)
>>> # w represents the number of incidences per bins, or "weights".
>>> w = np.random.randint(low=0, high=5, size=angles_bins.size)
>>> round(pg.circ_r(angles_bins, w), 4)
0.3642
| def circ_r(angles, w=None, d=None, axis=0):
"""Mean resultant vector length for circular data.
Parameters
----------
angles : array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
axis : int or None
Compute along this dimension. Default is the first axis (0).
Returns
-------
r : float
Circular mean vector length.
See also
--------
pingouin.circ_mean
Notes
-----
The length of the mean resultant vector is a crucial quantity for the
measurement of circular spread or hypothesis testing in directional
statistics. The closer it is to one, the more concentrated the data
sample is around the mean direction (Berens 2009).
The circular vector length of a set of angles :math:`\\alpha` is defined
by:
.. math::
\\bar{\\alpha} = \\frac{1}{N}\\left \\| \\sum_{j=1}^n
\\exp(i \\cdot \\alpha_j) \\right \\|
Missing values in ``angles`` are omitted from the calculations.
References
----------
* https://en.wikipedia.org/wiki/Mean_of_circular_quantities
* Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10),
1–21. https://doi.org/10.18637/jss.v031.i10
Examples
--------
1. Mean resultant vector length of a 1-D array of angles, in radians
>>> import pingouin as pg
>>> angles = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> r = pg.circ_r(angles)
>>> round(r, 4)
0.4972
Note that there is a close relationship between the vector length and the
circular standard deviation, i.e. :math:`\\sigma = \\sqrt{-2 \\ln R}`:
>>> import numpy as np
>>> round(np.sqrt(-2 * np.log(r)), 4)
1.1821
which gives similar result as SciPy built-in function:
>>> from scipy.stats import circstd
>>> round(circstd(angles), 4)
1.1821
Sanity check: if all angles are the same, the vector length should be one:
>>> angles = [3.14, 3.14, 3.14, 3.14]
>>> round(pg.circ_r(angles), 4)
1.0
2. Using a 2-D array of angles in degrees
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 5))
>>> deg
array([[322, 98, 230, 17, 83],
[106, 123, 57, 214, 225],
[ 96, 113, 126, 47, 73]])
We first need to convert from degrees to radians:
>>> rad = np.round(pg.convert_angles(deg, low=0, high=360), 4)
>>> rad
array([[-0.6632, 1.7104, -2.2689, 0.2967, 1.4486],
[ 1.85 , 2.1468, 0.9948, -2.5482, -2.3562],
[ 1.6755, 1.9722, 2.1991, 0.8203, 1.2741]])
>>> pg.circ_r(rad) # On the first axis (default)
array([0.46695499, 0.98398294, 0.3723287 , 0.31103746, 0.42527149])
>>> pg.circ_r(rad, axis=-1) # On the last axis (default)
array([0.28099998, 0.45456096, 0.88261161])
>>> round(pg.circ_r(rad, axis=None), 4) # Across the entire array
0.4486
Missing values are omitted from the calculations:
>>> rad[0, 0] = np.nan
>>> pg.circ_r(rad)
array([0.99619613, 0.98398294, 0.3723287 , 0.31103746, 0.42527149])
3. Using binned angles
>>> np.random.seed(123)
>>> nbins = 18 # Number of bins to divide the unit circle
>>> angles_bins = np.linspace(0, 2 * np.pi, nbins)
>>> # w represents the number of incidences per bins, or "weights".
>>> w = np.random.randint(low=0, high=5, size=angles_bins.size)
>>> round(pg.circ_r(angles_bins, w), 4)
0.3642
"""
angles = np.asarray(angles)
_checkangles(angles) # Check that angles is in radians
w = np.asarray(w) if w is not None else np.ones(angles.shape)
assert angles.shape == w.shape, "Input dimensions do not match."
# Add np.nan in weight vector (otherwise nansum(w) is wrong)
w = w.astype(float)
w[np.isnan(angles)] = np.nan
# Compute weighted sum of cos and sin of angles:
r = np.nansum(np.multiply(w, np.exp(1j * angles)), axis=axis)
# Calculate vector length:
r = np.abs(r) / np.nansum(w, axis=axis)
# For data with known spacing, apply correction factor
if d is not None:
c = d / 2 / np.sin(d / 2)
r = c * r
return r
| (angles, w=None, d=None, axis=0) |
31,987 | pingouin.circular | circ_rayleigh | Rayleigh test for non-uniformity of circular data.
Parameters
----------
angles : 1-D array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\pi]` or :math:`[-\pi, \pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(round(z, 3), round(pval, 6))
1.236 0.304844
2. Specifying w and d
>>> z, pval = circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
>>> print(round(z, 3), round(pval, 6))
0.278 0.806997
| def circ_rayleigh(angles, w=None, d=None):
"""Rayleigh test for non-uniformity of circular data.
Parameters
----------
angles : 1-D array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(round(z, 3), round(pval, 6))
1.236 0.304844
2. Specifying w and d
>>> z, pval = circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
>>> print(round(z, 3), round(pval, 6))
0.278 0.806997
"""
angles = np.asarray(angles)
_checkangles(angles) # Check that angles is in radians
if w is None:
r = circ_r(angles)
n = len(angles)
else:
assert len(angles) == len(w), "Input dimensions do not match"
r = circ_r(angles, w, d)
n = np.sum(w)
# Compute Rayleigh's statistic
R = n * r
z = (R**2) / n
# Compute p value using approxation in Zar (1999), p. 617
pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n**2 - R**2)) - (1 + 2 * n))
return z, pval
| (angles, w=None, d=None) |
31,988 | pingouin.circular | circ_vtest | V test for non-uniformity of circular data with a specified
mean direction.
Parameters
----------
angles : 1-D array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\pi]` or :math:`[-\pi, \pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
dir : float
Suspected mean direction (angle in radians).
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
V : float
V-statistic
pval : float
P-value
Notes
-----
H0: the population is uniformly distributed around the circle.
HA: the population is not distributed uniformly around the circle but
has a mean of dir.
Note: Not rejecting H0 may mean that the population is uniformly
distributed around the circle OR that it has a mode but that this mode
is not centered at dir.
The V test has more power than the Rayleigh test and is preferred if
there is reason to believe in a specific mean direction.
Adapted from the Matlab Circular Statistics Toolbox.
Examples
--------
1. V-test for non-uniformity of circular data.
>>> from pingouin import circ_vtest
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> v, pval = circ_vtest(x, dir=1)
>>> print(round(v, 3), pval)
2.486 0.05794648732225438
2. Specifying w and d
>>> v, pval = circ_vtest(x, dir=0.5, w=[.1, .2, .3, .4, .5], d=0.2)
>>> print(round(v, 3), round(pval, 5))
0.637 0.23086
| def circ_vtest(angles, dir=0.0, w=None, d=None):
"""V test for non-uniformity of circular data with a specified
mean direction.
Parameters
----------
angles : 1-D array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
dir : float
Suspected mean direction (angle in radians).
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
V : float
V-statistic
pval : float
P-value
Notes
-----
H0: the population is uniformly distributed around the circle.
HA: the population is not distributed uniformly around the circle but
has a mean of dir.
Note: Not rejecting H0 may mean that the population is uniformly
distributed around the circle OR that it has a mode but that this mode
is not centered at dir.
The V test has more power than the Rayleigh test and is preferred if
there is reason to believe in a specific mean direction.
Adapted from the Matlab Circular Statistics Toolbox.
Examples
--------
1. V-test for non-uniformity of circular data.
>>> from pingouin import circ_vtest
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> v, pval = circ_vtest(x, dir=1)
>>> print(round(v, 3), pval)
2.486 0.05794648732225438
2. Specifying w and d
>>> v, pval = circ_vtest(x, dir=0.5, w=[.1, .2, .3, .4, .5], d=0.2)
>>> print(round(v, 3), round(pval, 5))
0.637 0.23086
"""
angles = np.asarray(angles)
if w is None:
r = circ_r(angles)
mu = circ_mean(angles)
n = len(angles)
else:
assert len(angles) == len(w), "Input dimensions do not match"
r = circ_r(angles, w, d)
mu = circ_mean(angles, w)
n = np.sum(w)
# Compute Rayleigh and V statistics
R = n * r
v = R * np.cos(mu - dir)
# Compute p value
u = v * np.sqrt(2 / n)
pval = 1 - norm.cdf(u)
return v, pval
| (angles, dir=0.0, w=None, d=None) |
31,990 | pingouin.nonparametric | cochran | Cochran Q test. A special case of the Friedman test when the dependent
variable is binary.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Both wide and long-format dataframe are supported for this test.
dv : string
Name of column containing the dependent variable (only required if ``data`` is in
long format).
within : string
Name of column containing the within-subject factor (only required if ``data`` is in
long format). Two or more within-factor are not currently supported.
subject : string
Name of column containing the subject/rater identifier (only required if ``data`` is in
long format).
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'Q'``: The Cochran Q statistic
* ``'p-unc'``: Uncorrected p-value
* ``'dof'``: degrees of freedom
Notes
-----
The Cochran Q test [1]_ is a non-parametric test for ANOVA with repeated
measures where the dependent variable is binary.
The Q statistics is defined as:
.. math:: Q = \frac{(r-1)(r\sum_j^rx_j^2-N^2)}{rN-\sum_i^nx_i^2}
where :math:`N` is the total sum of all observations, :math:`j=1,...,r`
where :math:`r` is the number of repeated measures, :math:`i=1,...,n` where
:math:`n` is the number of observations per condition.
The p-value is then approximated using a chi-square distribution with
:math:`r-1` degrees of freedom:
.. math:: Q \sim \chi^2(r-1)
Data are expected to be in long-format. Missing values are automatically removed using a
strict listwise approach (= complete-case analysis). In other words, any subject with one or
more missing value(s) is completely removed from the dataframe prior to running the
test.
References
----------
.. [1] Cochran, W.G., 1950. The comparison of percentages in matched
samples. Biometrika 37, 256–266.
https://doi.org/10.1093/biomet/37.3-4.256
Examples
--------
Compute the Cochran Q test for repeated measurements.
>>> from pingouin import cochran, read_dataset
>>> df = read_dataset('cochran')
>>> cochran(data=df, dv='Energetic', within='Time', subject='Subject')
Source dof Q p-unc
cochran Time 2 6.705882 0.034981
Same but using a wide-format dataframe
>>> df_wide = df.pivot_table(index="Subject", columns="Time", values="Energetic")
>>> cochran(df_wide)
Source dof Q p-unc
cochran Within 2 6.705882 0.034981
| def cochran(data=None, dv=None, within=None, subject=None):
"""Cochran Q test. A special case of the Friedman test when the dependent
variable is binary.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Both wide and long-format dataframe are supported for this test.
dv : string
Name of column containing the dependent variable (only required if ``data`` is in
long format).
within : string
Name of column containing the within-subject factor (only required if ``data`` is in
long format). Two or more within-factor are not currently supported.
subject : string
Name of column containing the subject/rater identifier (only required if ``data`` is in
long format).
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'Q'``: The Cochran Q statistic
* ``'p-unc'``: Uncorrected p-value
* ``'dof'``: degrees of freedom
Notes
-----
The Cochran Q test [1]_ is a non-parametric test for ANOVA with repeated
measures where the dependent variable is binary.
The Q statistics is defined as:
.. math:: Q = \\frac{(r-1)(r\\sum_j^rx_j^2-N^2)}{rN-\\sum_i^nx_i^2}
where :math:`N` is the total sum of all observations, :math:`j=1,...,r`
where :math:`r` is the number of repeated measures, :math:`i=1,...,n` where
:math:`n` is the number of observations per condition.
The p-value is then approximated using a chi-square distribution with
:math:`r-1` degrees of freedom:
.. math:: Q \\sim \\chi^2(r-1)
Data are expected to be in long-format. Missing values are automatically removed using a
strict listwise approach (= complete-case analysis). In other words, any subject with one or
more missing value(s) is completely removed from the dataframe prior to running the
test.
References
----------
.. [1] Cochran, W.G., 1950. The comparison of percentages in matched
samples. Biometrika 37, 256–266.
https://doi.org/10.1093/biomet/37.3-4.256
Examples
--------
Compute the Cochran Q test for repeated measurements.
>>> from pingouin import cochran, read_dataset
>>> df = read_dataset('cochran')
>>> cochran(data=df, dv='Energetic', within='Time', subject='Subject')
Source dof Q p-unc
cochran Time 2 6.705882 0.034981
Same but using a wide-format dataframe
>>> df_wide = df.pivot_table(index="Subject", columns="Time", values="Energetic")
>>> cochran(df_wide)
Source dof Q p-unc
cochran Within 2 6.705882 0.034981
"""
# Convert from wide to long-format, if needed
if all([v is None for v in [dv, within, subject]]):
assert isinstance(data, pd.DataFrame)
data = data._get_numeric_data().dropna() # Listwise deletion of missing values
assert data.shape[0] > 2, "Data must have at least 3 non-missing rows."
assert data.shape[1] > 1, "Data must contain at least two columns."
data["Subj"] = np.arange(data.shape[0])
data = data.melt(id_vars="Subj", var_name="Within", value_name="DV")
subject, within, dv = "Subj", "Within", "DV"
# Check data
data = _check_dataframe(dv=dv, within=within, data=data, subject=subject, effects="within")
assert not data[within].isnull().any(), "Cannot have missing values in `within`."
assert not data[subject].isnull().any(), "Cannot have missing values in `subject`."
# Pivot and melt the table. This has several effects:
# 1) Force missing values to be explicit (a NaN cell is created)
# 2) Automatic collapsing to the mean if multiple within factors are present
# 3) If using dropna, remove rows with missing values (listwise deletion).
# The latter is the same behavior as JASP (= strict complete-case analysis).
data_piv = data.pivot_table(index=subject, columns=within, values=dv, observed=True)
data_piv = data_piv.dropna()
data = data_piv.melt(ignore_index=False, value_name=dv).reset_index()
# Groupby and extract size
grp = data.groupby(within, observed=True)[dv]
grp_s = data.groupby(subject, observed=True)[dv]
k = data[within].nunique()
dof = k - 1
# n = grp.count().unique()[0]
# Q statistic and p-value
q = (dof * (k * np.sum(grp.sum() ** 2) - grp.sum().sum() ** 2)) / (
k * grp.sum().sum() - np.sum(grp_s.sum() ** 2)
)
p_unc = scipy.stats.chi2.sf(q, dof)
# Create output dataframe
stats = pd.DataFrame({"Source": within, "dof": dof, "Q": q, "p-unc": p_unc}, index=["cochran"])
return _postprocess_dataframe(stats)
| (data=None, dv=None, within=None, subject=None) |
31,991 | pingouin.effsize | compute_bootci | Bootstrapped confidence intervals of univariate and bivariate functions.
Parameters
----------
x : 1D-array or list
First sample. Required for both bivariate and univariate functions.
y : 1D-array, list, or None
Second sample. Required only for bivariate functions.
func : str or custom function
Function to compute the bootstrapped statistic. Accepted string values are:
* ``'pearson'``: Pearson correlation (bivariate, paired x and y)
* ``'spearman'``: Spearman correlation (bivariate, paired x and y)
* ``'cohen'``: Cohen d effect size (bivariate, paired or unpaired x and y)
* ``'hedges'``: Hedges g effect size (bivariate, paired or unpaired x and y)
* ``'mean'``: Mean (univariate = only x)
* ``'std'``: Standard deviation (univariate)
* ``'var'``: Variance (univariate)
method : str
Method to compute the confidence intervals (see Notes):
* ``'cper'``: Bias-corrected percentile method (default)
* ``'norm'``: Normal approximation with bootstrapped bias and standard error
* ``'per'``: Simple percentile
paired : boolean
Indicates whether x and y are paired or not. For example, for correlation functions or
paired T-test, x and y are assumed to be paired. Pingouin will resample the pairs
(x_i, y_i) when paired=True, and resample x and y separately when paired=False.
If paired=True, x and y must have the same number of elements.
confidence : float
Confidence level (0.95 = 95%)
n_boot : int
Number of bootstrap iterations. The higher, the better, the slower.
decimals : int
Number of rounded decimals.
seed : int or None
Random seed for generating bootstrap samples.
return_dist : boolean
If True, return the confidence intervals and the bootstrapped distribution (e.g. for
plotting purposes).
Returns
-------
ci : array
Bootstrapped confidence intervals.
Notes
-----
Results have been tested against the
`bootci <https://www.mathworks.com/help/stats/bootci.html>`_ Matlab function.
Since version 1.7, SciPy also includes a built-in bootstrap function
:py:func:`scipy.stats.bootstrap`. The SciPy implementation has two advantages over Pingouin: it
is faster when using ``vectorized=True``, and it supports the bias-corrected and accelerated
(BCa) confidence intervals for univariate functions. However, unlike Pingouin, it does not
return the bootstrap distribution.
The percentile bootstrap method (``per``) is defined as the
:math:`100 \times \frac{\alpha}{2}` and :math:`100 \times \frac{1 - \alpha}{2}`
percentiles of the distribution of :math:`\theta` estimates obtained from resampling, where
:math:`\alpha` is the level of significance (1 - confidence, default = 0.05 for 95% CIs).
The bias-corrected percentile method (``cper``) corrects for bias of the bootstrap
distribution. This method is different from the BCa method — the default in Matlab and SciPy —
which corrects for both bias and skewness of the bootstrap distribution using jackknife
resampling.
The normal approximation method (``norm``) calculates the confidence intervals with the
standard normal distribution using bootstrapped bias and standard error.
References
----------
* DiCiccio, T. J., & Efron, B. (1996). Bootstrap confidence intervals. Statistical science,
189-212.
* Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1).
Cambridge university press.
* Jung, Lee, Gupta, & Cho (2019). Comparison of bootstrap confidence interval methods for
GSCA using a Monte Carlo simulation. Frontiers in psychology, 10, 2215.
Examples
--------
1. Bootstrapped 95% confidence interval of a Pearson correlation
>>> import pingouin as pg
>>> import numpy as np
>>> rng = np.random.default_rng(42)
>>> x = rng.normal(loc=4, scale=2, size=100)
>>> y = rng.normal(loc=3, scale=1, size=100)
>>> stat = np.corrcoef(x, y)[0][1]
>>> ci = pg.compute_bootci(x, y, func='pearson', paired=True, seed=42, decimals=4)
>>> print(round(stat, 4), ci)
0.0945 [-0.098 0.2738]
Let's compare to SciPy's built-in bootstrap function
>>> from scipy.stats import bootstrap
>>> bt_scipy = bootstrap(
... data=(x, y), statistic=lambda x, y: np.corrcoef(x, y)[0][1],
... method="basic", vectorized=False, n_resamples=2000, paired=True, random_state=42)
>>> np.round(bt_scipy.confidence_interval, 4)
array([-0.0952, 0.2883])
2. Bootstrapped 95% confidence interval of a Cohen d
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_bootci(x, y, func='cohen', seed=42, decimals=3)
>>> print(round(stat, 4), ci)
0.7009 [0.403 1.009]
3. Bootstrapped confidence interval of a standard deviation (univariate)
>>> import numpy as np
>>> stat = np.std(x, ddof=1)
>>> ci = pg.compute_bootci(x, func='std', seed=123)
>>> print(round(stat, 4), ci)
1.5534 [1.38 1.8 ]
Compare to SciPy's built-in bootstrap function, which returns the bias-corrected and
accelerated CIs (see Notes).
>>> def std(x, axis):
... return np.std(x, ddof=1, axis=axis)
>>> bt_scipy = bootstrap(data=(x, ), statistic=std, n_resamples=2000, random_state=123)
>>> np.round(bt_scipy.confidence_interval, 2)
array([1.39, 1.81])
Changing the confidence intervals type in Pingouin
>>> pg.compute_bootci(x, func='std', seed=123, method="norm")
array([1.37, 1.76])
>>> pg.compute_bootci(x, func='std', seed=123, method="percentile")
array([1.35, 1.75])
4. Bootstrapped confidence interval using a custom univariate function
>>> from scipy.stats import skew
>>> round(skew(x), 4), pg.compute_bootci(x, func=skew, n_boot=10000, seed=123)
(-0.137, array([-0.55, 0.32]))
5. Bootstrapped confidence interval using a custom bivariate function. Here, x and y are not
paired and can therefore have different sizes.
>>> def mean_diff(x, y):
... return np.mean(x) - np.mean(y)
>>> y2 = rng.normal(loc=3, scale=1, size=200) # y2 has 200 samples, x has 100
>>> ci = pg.compute_bootci(x, y2, func=mean_diff, n_boot=10000, seed=123)
>>> print(round(mean_diff(x, y2), 2), ci)
0.88 [0.54 1.21]
We can also get the bootstrapped distribution
>>> ci, bt = pg.compute_bootci(x, y2, func=mean_diff, n_boot=10000, return_dist=True, seed=9)
>>> print(f"The bootstrap distribution has {bt.size} samples. The mean and standard "
... f"{bt.mean():.4f} ± {bt.std():.4f}")
The bootstrap distribution has 10000 samples. The mean and standard 0.8807 ± 0.1704
| def compute_bootci(
x,
y=None,
func=None,
method="cper",
paired=False,
confidence=0.95,
n_boot=2000,
decimals=2,
seed=None,
return_dist=False,
):
"""Bootstrapped confidence intervals of univariate and bivariate functions.
Parameters
----------
x : 1D-array or list
First sample. Required for both bivariate and univariate functions.
y : 1D-array, list, or None
Second sample. Required only for bivariate functions.
func : str or custom function
Function to compute the bootstrapped statistic. Accepted string values are:
* ``'pearson'``: Pearson correlation (bivariate, paired x and y)
* ``'spearman'``: Spearman correlation (bivariate, paired x and y)
* ``'cohen'``: Cohen d effect size (bivariate, paired or unpaired x and y)
* ``'hedges'``: Hedges g effect size (bivariate, paired or unpaired x and y)
* ``'mean'``: Mean (univariate = only x)
* ``'std'``: Standard deviation (univariate)
* ``'var'``: Variance (univariate)
method : str
Method to compute the confidence intervals (see Notes):
* ``'cper'``: Bias-corrected percentile method (default)
* ``'norm'``: Normal approximation with bootstrapped bias and standard error
* ``'per'``: Simple percentile
paired : boolean
Indicates whether x and y are paired or not. For example, for correlation functions or
paired T-test, x and y are assumed to be paired. Pingouin will resample the pairs
(x_i, y_i) when paired=True, and resample x and y separately when paired=False.
If paired=True, x and y must have the same number of elements.
confidence : float
Confidence level (0.95 = 95%)
n_boot : int
Number of bootstrap iterations. The higher, the better, the slower.
decimals : int
Number of rounded decimals.
seed : int or None
Random seed for generating bootstrap samples.
return_dist : boolean
If True, return the confidence intervals and the bootstrapped distribution (e.g. for
plotting purposes).
Returns
-------
ci : array
Bootstrapped confidence intervals.
Notes
-----
Results have been tested against the
`bootci <https://www.mathworks.com/help/stats/bootci.html>`_ Matlab function.
Since version 1.7, SciPy also includes a built-in bootstrap function
:py:func:`scipy.stats.bootstrap`. The SciPy implementation has two advantages over Pingouin: it
is faster when using ``vectorized=True``, and it supports the bias-corrected and accelerated
(BCa) confidence intervals for univariate functions. However, unlike Pingouin, it does not
return the bootstrap distribution.
The percentile bootstrap method (``per``) is defined as the
:math:`100 \\times \\frac{\\alpha}{2}` and :math:`100 \\times \\frac{1 - \\alpha}{2}`
percentiles of the distribution of :math:`\\theta` estimates obtained from resampling, where
:math:`\\alpha` is the level of significance (1 - confidence, default = 0.05 for 95% CIs).
The bias-corrected percentile method (``cper``) corrects for bias of the bootstrap
distribution. This method is different from the BCa method — the default in Matlab and SciPy —
which corrects for both bias and skewness of the bootstrap distribution using jackknife
resampling.
The normal approximation method (``norm``) calculates the confidence intervals with the
standard normal distribution using bootstrapped bias and standard error.
References
----------
* DiCiccio, T. J., & Efron, B. (1996). Bootstrap confidence intervals. Statistical science,
189-212.
* Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1).
Cambridge university press.
* Jung, Lee, Gupta, & Cho (2019). Comparison of bootstrap confidence interval methods for
GSCA using a Monte Carlo simulation. Frontiers in psychology, 10, 2215.
Examples
--------
1. Bootstrapped 95% confidence interval of a Pearson correlation
>>> import pingouin as pg
>>> import numpy as np
>>> rng = np.random.default_rng(42)
>>> x = rng.normal(loc=4, scale=2, size=100)
>>> y = rng.normal(loc=3, scale=1, size=100)
>>> stat = np.corrcoef(x, y)[0][1]
>>> ci = pg.compute_bootci(x, y, func='pearson', paired=True, seed=42, decimals=4)
>>> print(round(stat, 4), ci)
0.0945 [-0.098 0.2738]
Let's compare to SciPy's built-in bootstrap function
>>> from scipy.stats import bootstrap
>>> bt_scipy = bootstrap(
... data=(x, y), statistic=lambda x, y: np.corrcoef(x, y)[0][1],
... method="basic", vectorized=False, n_resamples=2000, paired=True, random_state=42)
>>> np.round(bt_scipy.confidence_interval, 4)
array([-0.0952, 0.2883])
2. Bootstrapped 95% confidence interval of a Cohen d
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_bootci(x, y, func='cohen', seed=42, decimals=3)
>>> print(round(stat, 4), ci)
0.7009 [0.403 1.009]
3. Bootstrapped confidence interval of a standard deviation (univariate)
>>> import numpy as np
>>> stat = np.std(x, ddof=1)
>>> ci = pg.compute_bootci(x, func='std', seed=123)
>>> print(round(stat, 4), ci)
1.5534 [1.38 1.8 ]
Compare to SciPy's built-in bootstrap function, which returns the bias-corrected and
accelerated CIs (see Notes).
>>> def std(x, axis):
... return np.std(x, ddof=1, axis=axis)
>>> bt_scipy = bootstrap(data=(x, ), statistic=std, n_resamples=2000, random_state=123)
>>> np.round(bt_scipy.confidence_interval, 2)
array([1.39, 1.81])
Changing the confidence intervals type in Pingouin
>>> pg.compute_bootci(x, func='std', seed=123, method="norm")
array([1.37, 1.76])
>>> pg.compute_bootci(x, func='std', seed=123, method="percentile")
array([1.35, 1.75])
4. Bootstrapped confidence interval using a custom univariate function
>>> from scipy.stats import skew
>>> round(skew(x), 4), pg.compute_bootci(x, func=skew, n_boot=10000, seed=123)
(-0.137, array([-0.55, 0.32]))
5. Bootstrapped confidence interval using a custom bivariate function. Here, x and y are not
paired and can therefore have different sizes.
>>> def mean_diff(x, y):
... return np.mean(x) - np.mean(y)
>>> y2 = rng.normal(loc=3, scale=1, size=200) # y2 has 200 samples, x has 100
>>> ci = pg.compute_bootci(x, y2, func=mean_diff, n_boot=10000, seed=123)
>>> print(round(mean_diff(x, y2), 2), ci)
0.88 [0.54 1.21]
We can also get the bootstrapped distribution
>>> ci, bt = pg.compute_bootci(x, y2, func=mean_diff, n_boot=10000, return_dist=True, seed=9)
>>> print(f"The bootstrap distribution has {bt.size} samples. The mean and standard "
... f"{bt.mean():.4f} ± {bt.std():.4f}")
The bootstrap distribution has 10000 samples. The mean and standard 0.8807 ± 0.1704
"""
from inspect import isfunction, isroutine
from scipy.stats import norm
# Check other arguments
assert isinstance(confidence, float)
assert 0 < confidence < 1, "confidence must be between 0 and 1."
assert method in ["norm", "normal", "percentile", "per", "cpercentile", "cper"]
assert isfunction(func) or isinstance(func, str) or isroutine(func), (
"func must be a function (e.g. np.mean, custom function) or a string (e.g. 'pearson'). "
"See documentation for more details."
)
vectorizable = False
# Check x
x = np.asarray(x)
nx = x.size
assert x.ndim == 1, "x must be one-dimensional."
assert nx > 1, "x must have more than one element."
# Check y
if y is not None:
y = np.asarray(y)
ny = y.size
assert y.ndim == 1, "y must be one-dimensional."
assert ny > 1, "y must have more than one element."
if paired:
assert nx == ny, "x and y must have the same number of elements when paired=True."
# Check string functions
if isinstance(func, str):
func_str = "%s" % func
if func == "pearson":
assert paired, "Paired should be True if using correlation functions."
def func(x, y):
return pearsonr(x, y)[0] # Faster than np.corrcoef
elif func == "spearman":
from scipy.stats import spearmanr
assert paired, "Paired should be True if using correlation functions."
def func(x, y):
return spearmanr(x, y)[0]
elif func in ["cohen", "hedges"]:
from pingouin.effsize import compute_effsize
def func(x, y):
return compute_effsize(x, y, paired=paired, eftype=func_str)
elif func == "mean":
vectorizable = True
def func(x):
return np.mean(x, axis=0)
elif func == "std":
vectorizable = True
def func(x):
return np.std(x, ddof=1, axis=0)
elif func == "var":
vectorizable = True
def func(x):
return np.var(x, ddof=1, axis=0)
else:
raise ValueError("Function string not recognized.")
# Bootstrap
bootstat = np.empty(n_boot)
rng = np.random.default_rng(seed) # Random seed
boot_x = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
if y is not None:
reference = func(x, y)
if paired:
for i in range(n_boot):
# Note that here we use a bootstrapping procedure with replacement
# of all the pairs (Xi, Yi). This is NOT suited for
# hypothesis testing such as p-value estimation). Instead, for the
# latter, one must only shuffle the Y values while keeping the X
# values constant, i.e.:
# >>> boot_x = rng.random_sample((n_boot, n)).argsort(axis=1)
# >>> for i in range(n_boot):
# >>> bootstat[i] = func(x, y[boot_x[i, :]])
bootstat[i] = func(x[boot_x[:, i]], y[boot_x[:, i]])
else:
boot_y = rng.choice(np.arange(ny), size=(ny, n_boot), replace=True)
for i in range(n_boot):
bootstat[i] = func(x[boot_x[:, i]], y[boot_y[:, i]])
else:
reference = func(x)
if vectorizable:
bootstat = func(x[boot_x])
else:
for i in range(n_boot):
bootstat[i] = func(x[boot_x[:, i]])
# CONFIDENCE INTERVALS
# See Matlab bootci function
alpha = (1 - confidence) / 2
if method in ["norm", "normal"]:
# Normal approximation
za = norm.ppf(alpha) # = 1.96
se = np.std(bootstat, ddof=1)
bias = np.mean(bootstat - reference)
ci = np.array([reference - bias + se * za, reference - bias - se * za])
elif method in ["percentile", "per"]:
# Simple percentile
interval = 100 * np.array([alpha, 1 - alpha])
ci = np.percentile(bootstat, interval)
pass
else:
# Bias-corrected percentile bootstrap
from pingouin.regression import _bias_corrected_ci
ci = _bias_corrected_ci(bootstat, reference, alpha=(1 - confidence))
ci = np.round(ci, decimals)
if return_dist:
return ci, bootstat
else:
return ci
| (x, y=None, func=None, method='cper', paired=False, confidence=0.95, n_boot=2000, decimals=2, seed=None, return_dist=False) |
31,992 | pingouin.effsize | compute_effsize | Calculate effect size between two set of observations.
Parameters
----------
x : np.array or list
First set of observations.
y : np.array or list
Second set of observations.
paired : boolean
If True, uses Cohen d-avg formula to correct for repeated measurements
(see Notes).
eftype : string
Desired output effect size.
Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'pointbiserialr'``: Point-biserial correlation
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
Returns
-------
ef : float
Effect size
See Also
--------
convert_effsize : Conversion between effect sizes.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
Missing values are automatically removed from the data. If ``x`` and ``y`` are paired, the
entire row is removed.
If ``x`` and ``y`` are independent, the Cohen :math:`d` is:
.. math::
d = \frac{\overline{X} - \overline{Y}}
{\sqrt{\frac{(n_{1} - 1)\sigma_{1}^{2} + (n_{2} - 1)
\sigma_{2}^{2}}{n1 + n2 - 2}}}
If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed:
.. math::
d_{avg} = \frac{\overline{X} - \overline{Y}}
{\sqrt{\frac{(\sigma_1^2 + \sigma_2^2)}{2}}}
The Cohen's d is a biased estimate of the population effect size, especially for small samples
(n < 20). It is often preferable to use the corrected Hedges :math:`g` instead:
.. math:: g = d \times (1 - \frac{3}{4(n_1 + n_2) - 9})
The common language effect size is the proportion of pairs where ``x`` is higher than ``y``
(calculated with a brute-force approach where each observation of ``x`` is paired to each
observation of ``y``, see :py:func:`pingouin.wilcoxon` for more details):
.. math:: \text{CL} = P(X > Y) + .5 \times P(X = Y)
For other effect sizes, Pingouin will first calculate a Cohen :math:`d` and then use the
:py:func:`pingouin.convert_effsize` to convert to the desired effect size.
References
----------
* Lakens, D., 2013. Calculating and reporting effect sizes to
facilitate cumulative science: a practical primer for t-tests and
ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863
* Cumming, Geoff. Understanding the new statistics: Effect sizes,
confidence intervals, and meta-analysis. Routledge, 2013.
* https://osf.io/vbdah/
Examples
--------
1. Cohen d from two independent samples.
>>> import numpy as np
>>> import pingouin as pg
>>> x = [1, 2, 3, 4]
>>> y = [3, 4, 5, 6, 7]
>>> pg.compute_effsize(x, y, paired=False, eftype='cohen')
-1.707825127659933
The sign of the Cohen d will be opposite if we reverse the order of
``x`` and ``y``:
>>> pg.compute_effsize(y, x, paired=False, eftype='cohen')
1.707825127659933
2. Hedges g from two paired samples.
>>> x = [1, 2, 3, 4, 5, 6, 7]
>>> y = [1, 3, 5, 7, 9, 11, 13]
>>> pg.compute_effsize(x, y, paired=True, eftype='hedges')
-0.8222477210374874
3. Common Language Effect Size.
>>> pg.compute_effsize(x, y, eftype='cles')
0.2857142857142857
In other words, there are ~29% of pairs where ``x`` is higher than ``y``,
which means that there are ~71% of pairs where ``x`` is *lower* than ``y``.
This can be easily verified by changing the order of ``x`` and ``y``:
>>> pg.compute_effsize(y, x, eftype='cles')
0.7142857142857143
| def compute_effsize(x, y, paired=False, eftype="cohen"):
"""Calculate effect size between two set of observations.
Parameters
----------
x : np.array or list
First set of observations.
y : np.array or list
Second set of observations.
paired : boolean
If True, uses Cohen d-avg formula to correct for repeated measurements
(see Notes).
eftype : string
Desired output effect size.
Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'pointbiserialr'``: Point-biserial correlation
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
Returns
-------
ef : float
Effect size
See Also
--------
convert_effsize : Conversion between effect sizes.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
Missing values are automatically removed from the data. If ``x`` and ``y`` are paired, the
entire row is removed.
If ``x`` and ``y`` are independent, the Cohen :math:`d` is:
.. math::
d = \\frac{\\overline{X} - \\overline{Y}}
{\\sqrt{\\frac{(n_{1} - 1)\\sigma_{1}^{2} + (n_{2} - 1)
\\sigma_{2}^{2}}{n1 + n2 - 2}}}
If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed:
.. math::
d_{avg} = \\frac{\\overline{X} - \\overline{Y}}
{\\sqrt{\\frac{(\\sigma_1^2 + \\sigma_2^2)}{2}}}
The Cohen's d is a biased estimate of the population effect size, especially for small samples
(n < 20). It is often preferable to use the corrected Hedges :math:`g` instead:
.. math:: g = d \\times (1 - \\frac{3}{4(n_1 + n_2) - 9})
The common language effect size is the proportion of pairs where ``x`` is higher than ``y``
(calculated with a brute-force approach where each observation of ``x`` is paired to each
observation of ``y``, see :py:func:`pingouin.wilcoxon` for more details):
.. math:: \\text{CL} = P(X > Y) + .5 \\times P(X = Y)
For other effect sizes, Pingouin will first calculate a Cohen :math:`d` and then use the
:py:func:`pingouin.convert_effsize` to convert to the desired effect size.
References
----------
* Lakens, D., 2013. Calculating and reporting effect sizes to
facilitate cumulative science: a practical primer for t-tests and
ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863
* Cumming, Geoff. Understanding the new statistics: Effect sizes,
confidence intervals, and meta-analysis. Routledge, 2013.
* https://osf.io/vbdah/
Examples
--------
1. Cohen d from two independent samples.
>>> import numpy as np
>>> import pingouin as pg
>>> x = [1, 2, 3, 4]
>>> y = [3, 4, 5, 6, 7]
>>> pg.compute_effsize(x, y, paired=False, eftype='cohen')
-1.707825127659933
The sign of the Cohen d will be opposite if we reverse the order of
``x`` and ``y``:
>>> pg.compute_effsize(y, x, paired=False, eftype='cohen')
1.707825127659933
2. Hedges g from two paired samples.
>>> x = [1, 2, 3, 4, 5, 6, 7]
>>> y = [1, 3, 5, 7, 9, 11, 13]
>>> pg.compute_effsize(x, y, paired=True, eftype='hedges')
-0.8222477210374874
3. Common Language Effect Size.
>>> pg.compute_effsize(x, y, eftype='cles')
0.2857142857142857
In other words, there are ~29% of pairs where ``x`` is higher than ``y``,
which means that there are ~71% of pairs where ``x`` is *lower* than ``y``.
This can be easily verified by changing the order of ``x`` and ``y``:
>>> pg.compute_effsize(y, x, eftype='cles')
0.7142857142857143
"""
# Check arguments
if not _check_eftype(eftype):
err = f"Could not interpret input '{eftype}'"
raise ValueError(err)
x = np.asarray(x)
y = np.asarray(y)
if x.size != y.size and paired:
warnings.warn("x and y have unequal sizes. Switching to " "paired == False.")
paired = False
# Remove rows with missing values
x, y = remove_na(x, y, paired=paired)
nx, ny = x.size, y.size
if ny == 1:
# Case 1: One-sample Test
d = (x.mean() - y) / x.std(ddof=1)
return d
if eftype.lower() == "r":
# Return correlation coefficient (useful for CI bootstrapping)
r, _ = pearsonr(x, y)
return r
elif eftype.lower() == "cles":
# Compute exact CLES (see pingouin.wilcoxon)
diff = x[:, None] - y
return np.where(diff == 0, 0.5, diff > 0).mean()
else:
# Compute unbiased Cohen's d effect size
if not paired:
# https://en.wikipedia.org/wiki/Effect_size
dof = nx + ny - 2
poolsd = np.sqrt(((nx - 1) * x.var(ddof=1) + (ny - 1) * y.var(ddof=1)) / dof)
d = (x.mean() - y.mean()) / poolsd
else:
# Report Cohen d-avg (Cumming 2012; Lakens 2013)
# Careful, the formula in Lakens 2013 is wrong. Updated in Pingouin
# v0.3.4 to use the formula provided by Cummings 2012.
# Before that the denominator was just (SD1 + SD2) / 2
d = (x.mean() - y.mean()) / np.sqrt((x.var(ddof=1) + y.var(ddof=1)) / 2)
return convert_effsize(d, "cohen", eftype, nx=nx, ny=ny)
| (x, y, paired=False, eftype='cohen') |
31,993 | pingouin.effsize | compute_effsize_from_t | Compute effect size from a T-value.
Parameters
----------
tval : float
T-value
nx, ny : int, optional
Group sample sizes.
N : int, optional
Total sample size (will not be used if nx and ny are specified)
eftype : string, optional
Desired output effect size.
Returns
-------
ef : float
Effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
convert_effsize : Conversion between effect sizes.
Notes
-----
If both nx and ny are specified, the formula to convert from *t* to *d* is:
.. math:: d = t * \sqrt{\frac{1}{n_x} + \frac{1}{n_y}}
If only N (total sample size) is specified, the formula is:
.. math:: d = \frac{2t}{\sqrt{N}}
Examples
--------
1. Compute effect size from a T-value when both sample sizes are known.
>>> from pingouin import compute_effsize_from_t
>>> tval, nx, ny = 2.90, 35, 25
>>> d = compute_effsize_from_t(tval, nx=nx, ny=ny, eftype='cohen')
>>> print(d)
0.7593982580212534
2. Compute effect size when only total sample size is known (nx+ny)
>>> tval, N = 2.90, 60
>>> d = compute_effsize_from_t(tval, N=N, eftype='cohen')
>>> print(d)
0.7487767802667672
| def compute_effsize_from_t(tval, nx=None, ny=None, N=None, eftype="cohen"):
"""Compute effect size from a T-value.
Parameters
----------
tval : float
T-value
nx, ny : int, optional
Group sample sizes.
N : int, optional
Total sample size (will not be used if nx and ny are specified)
eftype : string, optional
Desired output effect size.
Returns
-------
ef : float
Effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
convert_effsize : Conversion between effect sizes.
Notes
-----
If both nx and ny are specified, the formula to convert from *t* to *d* is:
.. math:: d = t * \\sqrt{\\frac{1}{n_x} + \\frac{1}{n_y}}
If only N (total sample size) is specified, the formula is:
.. math:: d = \\frac{2t}{\\sqrt{N}}
Examples
--------
1. Compute effect size from a T-value when both sample sizes are known.
>>> from pingouin import compute_effsize_from_t
>>> tval, nx, ny = 2.90, 35, 25
>>> d = compute_effsize_from_t(tval, nx=nx, ny=ny, eftype='cohen')
>>> print(d)
0.7593982580212534
2. Compute effect size when only total sample size is known (nx+ny)
>>> tval, N = 2.90, 60
>>> d = compute_effsize_from_t(tval, N=N, eftype='cohen')
>>> print(d)
0.7487767802667672
"""
if not _check_eftype(eftype):
err = f"Could not interpret input '{eftype}'"
raise ValueError(err)
if not isinstance(tval, float):
err = "T-value must be float"
raise ValueError(err)
# Compute Cohen d (Lakens, 2013)
if nx is not None and ny is not None:
d = tval * np.sqrt(1 / nx + 1 / ny)
elif N is not None:
d = 2 * tval / np.sqrt(N)
else:
raise ValueError("You must specify either nx + ny, or just N")
return convert_effsize(d, "cohen", eftype, nx=nx, ny=ny)
| (tval, nx=None, ny=None, N=None, eftype='cohen') |
31,994 | pingouin.effsize | compute_esci | Parametric confidence intervals around a Cohen d or a correlation coefficient.
Parameters
----------
stat : float
Original effect size. Must be either a correlation coefficient or a Cohen-type effect size
(Cohen d or Hedges g).
nx, ny : int
Length of vector x and y.
paired : bool
Indicates if the effect size was estimated from a paired sample. This is only relevant for
cohen or hedges effect size.
eftype : string
Effect size type. Must be "r" (correlation) or "cohen" (Cohen d or Hedges g).
confidence : float
Confidence level (0.95 = 95%)
decimals : int
Number of rounded decimals.
alternative : string
Defines the alternative hypothesis, or tail for the correlation coefficient. Must be one of
"two-sided" (default), "greater" or "less". This parameter only has an effect if ``eftype``
is "r".
Returns
-------
ci : array
Desired converted effect size
Notes
-----
To compute the parametric confidence interval around a **Pearson r correlation** coefficient,
one must first apply a Fisher's r-to-z transformation:
.. math:: z = 0.5 \cdot \ln \frac{1 + r}{1 - r} = \text{arctanh}(r)
and compute the standard error:
.. math:: \text{SE} = \frac{1}{\sqrt{n - 3}}
where :math:`n` is the sample size.
The lower and upper confidence intervals - *in z-space* - are then given by:
.. math:: \text{ci}_z = z \pm \text{crit} \cdot \text{SE}
where :math:`\text{crit}` is the critical value of the normal distribution corresponding to
the desired confidence level (e.g. 1.96 in case of a 95% confidence interval).
These confidence intervals can then be easily converted back to *r-space*:
.. math::
\text{ci}_r = \frac{\exp(2 \cdot \text{ci}_z) - 1}
{\exp(2 \cdot \text{ci}_z) + 1} = \text{tanh}(\text{ci}_z)
A formula for calculating the confidence interval for a **Cohen d effect size** is given by
Hedges and Olkin (1985, p86). If the effect size estimate from the sample is :math:`d`, then
it follows a T distribution with standard error:
.. math::
\text{SE} = \sqrt{\frac{n_x + n_y}{n_x \cdot n_y} +
\frac{d^2}{2 (n_x + n_y)}}
where :math:`n_x` and :math:`n_y` are the sample sizes of the two groups.
In one-sample test or paired test, this becomes:
.. math::
\text{SE} = \sqrt{\frac{1}{n_x} + \frac{d^2}{2 n_x}}
The lower and upper confidence intervals are then given by:
.. math:: \text{ci}_d = d \pm \text{crit} \cdot \text{SE}
where :math:`\text{crit}` is the critical value of the T distribution corresponding to the
desired confidence level.
References
----------
* https://en.wikipedia.org/wiki/Fisher_transformation
* Hedges, L., and Ingram Olkin. "Statistical models for meta-analysis." (1985).
* http://www.leeds.ac.uk/educol/documents/00002182.htm
* https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5133225/
Examples
--------
1. Confidence interval of a Pearson correlation coefficient
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = pg.compute_effsize(x, y, eftype='r')
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='r')
>>> print(round(stat, 4), ci)
0.7468 [0.27 0.93]
2. Confidence interval of a Cohen d
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_esci(stat, nx=nx, ny=ny, eftype='cohen', decimals=3)
>>> print(round(stat, 4), ci)
0.1538 [-0.737 1.045]
| def compute_esci(
stat=None,
nx=None,
ny=None,
paired=False,
eftype="cohen",
confidence=0.95,
decimals=2,
alternative="two-sided",
):
"""Parametric confidence intervals around a Cohen d or a correlation coefficient.
Parameters
----------
stat : float
Original effect size. Must be either a correlation coefficient or a Cohen-type effect size
(Cohen d or Hedges g).
nx, ny : int
Length of vector x and y.
paired : bool
Indicates if the effect size was estimated from a paired sample. This is only relevant for
cohen or hedges effect size.
eftype : string
Effect size type. Must be "r" (correlation) or "cohen" (Cohen d or Hedges g).
confidence : float
Confidence level (0.95 = 95%)
decimals : int
Number of rounded decimals.
alternative : string
Defines the alternative hypothesis, or tail for the correlation coefficient. Must be one of
"two-sided" (default), "greater" or "less". This parameter only has an effect if ``eftype``
is "r".
Returns
-------
ci : array
Desired converted effect size
Notes
-----
To compute the parametric confidence interval around a **Pearson r correlation** coefficient,
one must first apply a Fisher's r-to-z transformation:
.. math:: z = 0.5 \\cdot \\ln \\frac{1 + r}{1 - r} = \\text{arctanh}(r)
and compute the standard error:
.. math:: \\text{SE} = \\frac{1}{\\sqrt{n - 3}}
where :math:`n` is the sample size.
The lower and upper confidence intervals - *in z-space* - are then given by:
.. math:: \\text{ci}_z = z \\pm \\text{crit} \\cdot \\text{SE}
where :math:`\\text{crit}` is the critical value of the normal distribution corresponding to
the desired confidence level (e.g. 1.96 in case of a 95% confidence interval).
These confidence intervals can then be easily converted back to *r-space*:
.. math::
\\text{ci}_r = \\frac{\\exp(2 \\cdot \\text{ci}_z) - 1}
{\\exp(2 \\cdot \\text{ci}_z) + 1} = \\text{tanh}(\\text{ci}_z)
A formula for calculating the confidence interval for a **Cohen d effect size** is given by
Hedges and Olkin (1985, p86). If the effect size estimate from the sample is :math:`d`, then
it follows a T distribution with standard error:
.. math::
\\text{SE} = \\sqrt{\\frac{n_x + n_y}{n_x \\cdot n_y} +
\\frac{d^2}{2 (n_x + n_y)}}
where :math:`n_x` and :math:`n_y` are the sample sizes of the two groups.
In one-sample test or paired test, this becomes:
.. math::
\\text{SE} = \\sqrt{\\frac{1}{n_x} + \\frac{d^2}{2 n_x}}
The lower and upper confidence intervals are then given by:
.. math:: \\text{ci}_d = d \\pm \\text{crit} \\cdot \\text{SE}
where :math:`\\text{crit}` is the critical value of the T distribution corresponding to the
desired confidence level.
References
----------
* https://en.wikipedia.org/wiki/Fisher_transformation
* Hedges, L., and Ingram Olkin. "Statistical models for meta-analysis." (1985).
* http://www.leeds.ac.uk/educol/documents/00002182.htm
* https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5133225/
Examples
--------
1. Confidence interval of a Pearson correlation coefficient
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = pg.compute_effsize(x, y, eftype='r')
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='r')
>>> print(round(stat, 4), ci)
0.7468 [0.27 0.93]
2. Confidence interval of a Cohen d
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_esci(stat, nx=nx, ny=ny, eftype='cohen', decimals=3)
>>> print(round(stat, 4), ci)
0.1538 [-0.737 1.045]
"""
from scipy.stats import norm, t
assert eftype.lower() in ["r", "pearson", "spearman", "cohen", "d", "g", "hedges"]
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
assert stat is not None and nx is not None
assert isinstance(confidence, float)
assert 0 < confidence < 1, "confidence must be between 0 and 1."
if eftype.lower() in ["r", "pearson", "spearman"]:
z = np.arctanh(stat) # R-to-z transform
se = 1 / np.sqrt(nx - 3)
# See https://github.com/SurajGupta/r-source/blob/master/src/library/stats/R/cor.test.R
if alternative == "two-sided":
crit = np.abs(norm.ppf((1 - confidence) / 2))
ci_z = np.array([z - crit * se, z + crit * se])
elif alternative == "greater":
crit = norm.ppf(confidence)
ci_z = np.array([z - crit * se, np.inf])
else: # alternative = "less"
crit = norm.ppf(confidence)
ci_z = np.array([-np.inf, z + crit * se])
ci = np.tanh(ci_z) # Transform back to r
else:
# Cohen d. Results are different than JASP which uses a non-central T
# distribution. See github.com/jasp-stats/jasp-issues/issues/525
if ny == 1 or paired:
# One-sample or paired. Results vary slightly from the cohen.d R
# function which uses instead:
# >>> sqrt((n / (n / 2)^2) + .5*(dd^2 / n)) -- one-sample
# >>> sqrt( (1/n1 + dd^2/(2*n1))*(2-2*r)); -- paired
# where r is the correlation between samples
# https://github.com/mtorchiano/effsize/blob/master/R/CohenD.R
# However, Pingouin uses the formulas on www.real-statistics.com
se = np.sqrt(1 / nx + stat**2 / (2 * nx))
dof = nx - 1
else:
# Independent two-samples: give same results as R:
# >>> cohen.d(..., paired = FALSE, noncentral=FALSE)
se = np.sqrt(((nx + ny) / (nx * ny)) + (stat**2) / (2 * (nx + ny)))
dof = nx + ny - 2
crit = np.abs(t.ppf((1 - confidence) / 2, dof))
ci = np.array([stat - crit * se, stat + crit * se])
return np.round(ci, decimals)
| (stat=None, nx=None, ny=None, paired=False, eftype='cohen', confidence=0.95, decimals=2, alternative='two-sided') |
31,997 | pingouin.circular | convert_angles | Element-wise conversion of arbitrary-unit circular quantities
to radians.
.. versionadded:: 0.3.4
Parameters
----------
angles : array_like
Circular data.
low : float or int, optional
Low boundary for ``angles`` range. Default is 0.
high : float or int, optional
High boundary for ``angles`` range. Default is 360
(for degrees to radians conversion).
positive : boolean
If True, radians are mapped on the :math:`[0, 2\pi]`. Otherwise,
the resulting angles are mapped from :math:`[-\pi, \pi)` (default).
Returns
-------
radians : array_like
Circular data in radians.
Notes
-----
The formula to convert a set of angles :math:`\alpha` from an arbitrary
range :math:`[\text{high},\text{low}]` to radians
:math:`[0, 2\pi]` is:
.. math::
\alpha_r = \frac{2\pi\alpha}{\text{high} - \text{low}}
If ``positive=False`` (default), the resulting angles in
radians :math:`\alpha_r` are then wrapped to the :math:`[-\pi, \pi)`
range:
.. math::
(\text{angle} + \pi) \mod 2 \pi - \pi
Examples
--------
1. Convert degrees to radians
>>> from pingouin import convert_angles
>>> a = [0, 360, 180, 90, 45, 270]
>>> convert_angles(a, low=0, high=360)
array([ 0. , 0. , -3.14159265, 1.57079633, 0.78539816,
-1.57079633])
with ``positive=True``:
>>> convert_angles(a, low=0, high=360, positive=True)
array([0. , 6.28318531, 3.14159265, 1.57079633, 0.78539816,
4.71238898])
2. Convert hours (24h-format) to radians
>>> sleep_onset = [22.5, 23.25, 24, 0.5, 1]
>>> convert_angles(sleep_onset, low=0, high=24)
array([-0.39269908, -0.19634954, 0. , 0.13089969, 0.26179939])
3. Convert radians from :math:`[0, 2\pi]` to :math:`[-\pi, \pi)`:
>>> import numpy as np
>>> rad = [0.1, 3.14, 5, 2, 6]
>>> convert_angles(rad, low=0, high=2*np.pi)
array([ 0.1 , 3.14 , -1.28318531, 2. , -0.28318531])
4. Convert degrees from a 2-D array
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 4))
>>> convert_angles(deg)
array([[-0.66322512, 1.71042267, -2.26892803, 0.29670597],
[ 1.44862328, 1.85004901, 2.14675498, 0.99483767],
[-2.54818071, -2.35619449, 1.67551608, 1.97222205]])
| def convert_angles(angles, low=0, high=360, positive=False):
"""Element-wise conversion of arbitrary-unit circular quantities
to radians.
.. versionadded:: 0.3.4
Parameters
----------
angles : array_like
Circular data.
low : float or int, optional
Low boundary for ``angles`` range. Default is 0.
high : float or int, optional
High boundary for ``angles`` range. Default is 360
(for degrees to radians conversion).
positive : boolean
If True, radians are mapped on the :math:`[0, 2\\pi]`. Otherwise,
the resulting angles are mapped from :math:`[-\\pi, \\pi)` (default).
Returns
-------
radians : array_like
Circular data in radians.
Notes
-----
The formula to convert a set of angles :math:`\\alpha` from an arbitrary
range :math:`[\\text{high},\\text{low}]` to radians
:math:`[0, 2\\pi]` is:
.. math::
\\alpha_r = \\frac{2\\pi\\alpha}{\\text{high} - \\text{low}}
If ``positive=False`` (default), the resulting angles in
radians :math:`\\alpha_r` are then wrapped to the :math:`[-\\pi, \\pi)`
range:
.. math::
(\\text{angle} + \\pi) \\mod 2 \\pi - \\pi
Examples
--------
1. Convert degrees to radians
>>> from pingouin import convert_angles
>>> a = [0, 360, 180, 90, 45, 270]
>>> convert_angles(a, low=0, high=360)
array([ 0. , 0. , -3.14159265, 1.57079633, 0.78539816,
-1.57079633])
with ``positive=True``:
>>> convert_angles(a, low=0, high=360, positive=True)
array([0. , 6.28318531, 3.14159265, 1.57079633, 0.78539816,
4.71238898])
2. Convert hours (24h-format) to radians
>>> sleep_onset = [22.5, 23.25, 24, 0.5, 1]
>>> convert_angles(sleep_onset, low=0, high=24)
array([-0.39269908, -0.19634954, 0. , 0.13089969, 0.26179939])
3. Convert radians from :math:`[0, 2\\pi]` to :math:`[-\\pi, \\pi)`:
>>> import numpy as np
>>> rad = [0.1, 3.14, 5, 2, 6]
>>> convert_angles(rad, low=0, high=2*np.pi)
array([ 0.1 , 3.14 , -1.28318531, 2. , -0.28318531])
4. Convert degrees from a 2-D array
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 4))
>>> convert_angles(deg)
array([[-0.66322512, 1.71042267, -2.26892803, 0.29670597],
[ 1.44862328, 1.85004901, 2.14675498, 0.99483767],
[-2.54818071, -2.35619449, 1.67551608, 1.97222205]])
"""
assert isinstance(positive, bool)
assert isinstance(high, (int, float)), "high must be numeric"
assert isinstance(low, (int, float)), "low must be numeric"
ptp = high - low
assert ptp > 0, "high - low must be strictly positive."
angles = np.asarray(angles)
assert np.nanmin(angles) >= low, "angles cannot be >= low."
assert np.nanmax(angles) <= high, "angles cannot be <= high."
# Map to [0, 2pi] range
rad = angles * (2 * np.pi) / ptp
if not positive:
# https://stackoverflow.com/a/29237626/10581531
# Map to [-pi, pi) range:
rad = (rad + np.pi) % (2 * np.pi) - np.pi # [-pi, pi)
# Map to (-pi, pi] range:
# rad = np.angle(np.exp(1j * rad))
# rad = -1 * ((-rad + np.pi) % (2 * np.pi) - np.pi)
return rad
| (angles, low=0, high=360, positive=False) |
31,998 | pingouin.effsize | convert_effsize | Conversion between effect sizes.
Parameters
----------
ef : float
Original effect size.
input_type : string
Effect size type of ef. Must be ``'cohen'`` or ``'pointbiserialr'``.
output_type : string
Desired effect size type. Available methods are:
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'pointbiserialr'``: Point-biserial correlation
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'none'``: pass-through (return ``ef``)
nx, ny : int, optional
Length of vector x and y. Required to convert to Hedges g.
Returns
-------
ef : float
Desired converted effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
The formula to convert from a`point-biserial correlation
<https://en.wikipedia.org/wiki/Point-biserial_correlation_coefficient>`_ **r** to **d** is
given in [1]_:
.. math:: d = \frac{2r_{pb}}{\sqrt{1 - r_{pb}^2}}
The formula to convert **d** to a point-biserial correlation **r** is given in [2]_:
.. math::
r_{pb} = \frac{d}{\sqrt{d^2 + \frac{(n_x + n_y)^2 - 2(n_x + n_y)}
{n_xn_y}}}
The formula to convert **d** to :math:`\eta^2` is given in [3]_:
.. math:: \eta^2 = \frac{(0.5 d)^2}{1 + (0.5 d)^2}
The formula to convert **d** to an odds-ratio is given in [4]_:
.. math:: \text{OR} = \exp (\frac{d \pi}{\sqrt{3}})
The formula to convert **d** to area under the curve is given in [5]_:
.. math:: \text{AUC} = \mathcal{N}_{cdf}(\frac{d}{\sqrt{2}})
References
----------
.. [1] Rosenthal, Robert. "Parametric measures of effect size."
The handbook of research synthesis 621 (1994): 231-244.
.. [2] McGrath, Robert E., and Gregory J. Meyer. "When effect sizes
disagree: the case of r and d." Psychological methods 11.4 (2006): 386.
.. [3] Cohen, Jacob. "Statistical power analysis for the behavioral
sciences. 2nd." (1988).
.. [4] Borenstein, Michael, et al. "Effect sizes for continuous data."
The handbook of research synthesis and meta-analysis 2 (2009): 221-235.
.. [5] Ruscio, John. "A probability-based measure of effect size:
Robustness to base rates and other factors." Psychological methods 1
3.1 (2008): 19.
Examples
--------
1. Convert from Cohen d to eta-square
>>> import pingouin as pg
>>> d = .45
>>> eta = pg.convert_effsize(d, 'cohen', 'eta-square')
>>> print(eta)
0.048185603807257595
2. Convert from Cohen d to Hegdes g (requires the sample sizes of each
group)
>>> pg.convert_effsize(.45, 'cohen', 'hedges', nx=10, ny=10)
0.4309859154929578
3. Convert a point-biserial correlation to Cohen d
>>> rpb = 0.40
>>> d = pg.convert_effsize(rpb, 'pointbiserialr', 'cohen')
>>> print(d)
0.8728715609439696
4. Reverse operation: convert Cohen d to a point-biserial correlation
>>> pg.convert_effsize(d, 'cohen', 'pointbiserialr')
0.4000000000000001
| def convert_effsize(ef, input_type, output_type, nx=None, ny=None):
"""Conversion between effect sizes.
Parameters
----------
ef : float
Original effect size.
input_type : string
Effect size type of ef. Must be ``'cohen'`` or ``'pointbiserialr'``.
output_type : string
Desired effect size type. Available methods are:
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'pointbiserialr'``: Point-biserial correlation
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'none'``: pass-through (return ``ef``)
nx, ny : int, optional
Length of vector x and y. Required to convert to Hedges g.
Returns
-------
ef : float
Desired converted effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
The formula to convert from a`point-biserial correlation
<https://en.wikipedia.org/wiki/Point-biserial_correlation_coefficient>`_ **r** to **d** is
given in [1]_:
.. math:: d = \\frac{2r_{pb}}{\\sqrt{1 - r_{pb}^2}}
The formula to convert **d** to a point-biserial correlation **r** is given in [2]_:
.. math::
r_{pb} = \\frac{d}{\\sqrt{d^2 + \\frac{(n_x + n_y)^2 - 2(n_x + n_y)}
{n_xn_y}}}
The formula to convert **d** to :math:`\\eta^2` is given in [3]_:
.. math:: \\eta^2 = \\frac{(0.5 d)^2}{1 + (0.5 d)^2}
The formula to convert **d** to an odds-ratio is given in [4]_:
.. math:: \\text{OR} = \\exp (\\frac{d \\pi}{\\sqrt{3}})
The formula to convert **d** to area under the curve is given in [5]_:
.. math:: \\text{AUC} = \\mathcal{N}_{cdf}(\\frac{d}{\\sqrt{2}})
References
----------
.. [1] Rosenthal, Robert. "Parametric measures of effect size."
The handbook of research synthesis 621 (1994): 231-244.
.. [2] McGrath, Robert E., and Gregory J. Meyer. "When effect sizes
disagree: the case of r and d." Psychological methods 11.4 (2006): 386.
.. [3] Cohen, Jacob. "Statistical power analysis for the behavioral
sciences. 2nd." (1988).
.. [4] Borenstein, Michael, et al. "Effect sizes for continuous data."
The handbook of research synthesis and meta-analysis 2 (2009): 221-235.
.. [5] Ruscio, John. "A probability-based measure of effect size:
Robustness to base rates and other factors." Psychological methods 1
3.1 (2008): 19.
Examples
--------
1. Convert from Cohen d to eta-square
>>> import pingouin as pg
>>> d = .45
>>> eta = pg.convert_effsize(d, 'cohen', 'eta-square')
>>> print(eta)
0.048185603807257595
2. Convert from Cohen d to Hegdes g (requires the sample sizes of each
group)
>>> pg.convert_effsize(.45, 'cohen', 'hedges', nx=10, ny=10)
0.4309859154929578
3. Convert a point-biserial correlation to Cohen d
>>> rpb = 0.40
>>> d = pg.convert_effsize(rpb, 'pointbiserialr', 'cohen')
>>> print(d)
0.8728715609439696
4. Reverse operation: convert Cohen d to a point-biserial correlation
>>> pg.convert_effsize(d, 'cohen', 'pointbiserialr')
0.4000000000000001
"""
it = input_type.lower()
ot = output_type.lower()
# Check input and output type
for inp in [it, ot]:
if not _check_eftype(inp):
err = f"Could not interpret input '{inp}'"
raise ValueError(err)
if it not in ["pointbiserialr", "cohen"]:
raise ValueError("Input type must be 'cohen' or 'pointbiserialr'")
# Pass-through option
if it == ot or ot == "none":
return ef
# Convert point-biserial r to Cohen d (Rosenthal 1994)
d = (2 * ef) / np.sqrt(1 - ef**2) if it == "pointbiserialr" else ef
# Then convert to the desired output type
if ot == "cohen":
return d
elif ot == "hedges":
if all(v is not None for v in [nx, ny]):
return d * (1 - (3 / (4 * (nx + ny) - 9)))
else:
# If shapes of x and y are not known, return cohen's d
warnings.warn(
"You need to pass nx and ny arguments to compute "
"Hedges g. Returning Cohen's d instead"
)
return d
elif ot == "pointbiserialr":
# McGrath and Meyer 2006
if all(v is not None for v in [nx, ny]):
a = ((nx + ny) ** 2 - 2 * (nx + ny)) / (nx * ny)
else:
a = 4
return d / np.sqrt(d**2 + a)
elif ot == "eta-square":
# Cohen 1988
return (d / 2) ** 2 / (1 + (d / 2) ** 2)
elif ot == "odds-ratio":
# Borenstein et al. 2009
return np.exp(d * np.pi / np.sqrt(3))
elif ot == "r":
# https://github.com/raphaelvallat/pingouin/issues/302
raise ValueError(
"Using effect size 'r' in `pingouin.convert_effsize` has been deprecated. "
"Please use 'pointbiserialr' instead."
)
else: # ['auc']
# Ruscio 2008
from scipy.stats import norm
return norm.cdf(d / np.sqrt(2))
| (ef, input_type, output_type, nx=None, ny=None) |
31,999 | pingouin.correlation | corr | (Robust) correlation between two variables.
Parameters
----------
x, y : array_like
First and second set of observations. ``x`` and ``y`` must be
independent.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
method : string
Correlation type:
* ``'pearson'``: Pearson :math:`r` product-moment correlation
* ``'spearman'``: Spearman :math:`\rho` rank-order correlation
* ``'kendall'``: Kendall's :math:`\tau_B` correlation (for ordinal data)
* ``'bicor'``: Biweight midcorrelation (robust)
* ``'percbend'``: Percentage bend correlation (robust)
* ``'shepherd'``: Shepherd's pi correlation (robust)
* ``'skipped'``: Skipped correlation (robust)
**kwargs : optional
Optional argument(s) passed to the lower-level correlation functions.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'n'``: Sample size (after removal of missing values)
* ``'outliers'``: number of outliers, only if a robust method was used
* ``'r'``: Correlation coefficient
* ``'CI95%'``: 95% parametric confidence intervals around :math:`r`
* ``'p-val'``: p-value
* ``'BF10'``: Bayes Factor of the alternative hypothesis (only for Pearson correlation)
* ``'power'``: achieved power of the test with an alpha of 0.05.
See also
--------
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
partial_corr : Partial correlation
rm_corr : Repeated measures correlation
Notes
-----
The `Pearson correlation coefficient
<https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
measures the linear relationship between two datasets. Strictly speaking,
Pearson's correlation requires that each dataset be normally distributed.
Correlations of -1 or +1 imply a perfect negative and positive linear
relationship, respectively, with 0 indicating the absence of association.
.. math::
r_{xy} = \frac{\sum_i(x_i - \bar{x})(y_i - \bar{y})}
{\sqrt{\sum_i(x_i - \bar{x})^2} \sqrt{\sum_i(y_i - \bar{y})^2}}
= \frac{\text{cov}(x, y)}{\sigma_x \sigma_y}
where :math:`\text{cov}` is the sample covariance and :math:`\sigma`
is the sample standard deviation.
If ``method='pearson'``, The Bayes Factor is calculated using the
:py:func:`pingouin.bayesfactor_pearson` function.
The `Spearman correlation coefficient
<https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_
is a non-parametric measure of the monotonicity of the relationship between
two datasets. Unlike the Pearson correlation, the Spearman correlation does
not assume that both datasets are normally distributed. Correlations of -1
or +1 imply an exact negative and positive monotonic relationship,
respectively. Mathematically, the Spearman correlation coefficient is
defined as the Pearson correlation coefficient between the
`rank variables <https://en.wikipedia.org/wiki/Ranking>`_.
The `Kendall correlation coefficient
<https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_
is a measure of the correspondence between two rankings. Values also range
from -1 (perfect disagreement) to 1 (perfect agreement), with 0 indicating
the absence of association. Consistent with
:py:func:`scipy.stats.kendalltau`, Pingouin returns the Tau-b coefficient,
which adjusts for ties:
.. math:: \tau_B = \frac{(P - Q)}{\sqrt{(P + Q + T) (P + Q + U)}}
where :math:`P` is the number of concordant pairs, :math:`Q` the number of
discordand pairs, :math:`T` the number of ties in x, and :math:`U`
the number of ties in y.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ and
percentage bend correlation [1]_ are both robust methods that
protects against *univariate* outliers by down-weighting observations that
deviate too much from the median.
The Shepherd pi [2]_ correlation and skipped [3]_, [4]_ correlation are
both robust methods that returns the Spearman correlation coefficient after
removing *bivariate* outliers. Briefly, the Shepherd pi uses a
bootstrapping of the Mahalanobis distance to identify outliers, while the
skipped correlation is based on the minimum covariance determinant
(which requires scikit-learn). Note that these two methods are
significantly slower than the previous ones.
The confidence intervals for the correlation coefficient are estimated
using the Fisher transformation.
.. important:: Rows with missing values (NaN) are automatically removed.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Schwarzkopf, D.S., De Haas, B., Rees, G., 2012. Better ways to
improve standards in brain-behavior correlation analysis. Front.
Hum. Neurosci. 6, 200. https://doi.org/10.3389/fnhum.2012.00200
.. [3] Rousselet, G.A., Pernet, C.R., 2012. Improving standards in
brain-behavior correlation analyses. Front. Hum. Neurosci. 6, 119.
https://doi.org/10.3389/fnhum.2012.00119
.. [4] Pernet, C.R., Wilcox, R., Rousselet, G.A., 2012. Robust correlation
analyses: false positive and power validation using a new open
source matlab toolbox. Front. Psychol. 3, 606.
https://doi.org/10.3389/fpsyg.2012.00606
Examples
--------
1. Pearson correlation
>>> import numpy as np
>>> import pingouin as pg
>>> # Generate random correlated samples
>>> np.random.seed(123)
>>> mean, cov = [4, 6], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 30).T
>>> # Compute Pearson correlation
>>> pg.corr(x, y).round(3)
n r CI95% p-val BF10 power
pearson 30 0.491 [0.16, 0.72] 0.006 8.55 0.809
2. Pearson correlation with two outliers
>>> x[3], y[5] = 12, -8
>>> pg.corr(x, y).round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.439 0.302 0.121
3. Spearman correlation (robust to outliers)
>>> pg.corr(x, y, method="spearman").round(3)
n r CI95% p-val power
spearman 30 0.401 [0.05, 0.67] 0.028 0.61
4. Biweight midcorrelation (robust)
>>> pg.corr(x, y, method="bicor").round(3)
n r CI95% p-val power
bicor 30 0.393 [0.04, 0.66] 0.031 0.592
5. Percentage bend correlation (robust)
>>> pg.corr(x, y, method='percbend').round(3)
n r CI95% p-val power
percbend 30 0.389 [0.03, 0.66] 0.034 0.581
6. Shepherd's pi correlation (robust)
>>> pg.corr(x, y, method='shepherd').round(3)
n outliers r CI95% p-val power
shepherd 30 2 0.437 [0.08, 0.7] 0.02 0.662
7. Skipped spearman correlation (robust)
>>> pg.corr(x, y, method='skipped').round(3)
n outliers r CI95% p-val power
skipped 30 2 0.437 [0.08, 0.7] 0.02 0.662
8. One-tailed Pearson correlation
>>> pg.corr(x, y, alternative="greater", method='pearson').round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-0.17, 1.0] 0.22 0.467 0.194
>>> pg.corr(x, y, alternative="less", method='pearson').round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-1.0, 0.43] 0.78 0.137 0.008
9. Perfect correlation
>>> pg.corr(x, -x).round(3)
n r CI95% p-val BF10 power
pearson 30 -1.0 [-1.0, -1.0] 0.0 inf 1
10. Using columns of a pandas dataframe
>>> import pandas as pd
>>> data = pd.DataFrame({'x': x, 'y': y})
>>> pg.corr(data['x'], data['y']).round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.439 0.302 0.121
| def corr(x, y, alternative="two-sided", method="pearson", **kwargs):
"""(Robust) correlation between two variables.
Parameters
----------
x, y : array_like
First and second set of observations. ``x`` and ``y`` must be
independent.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
method : string
Correlation type:
* ``'pearson'``: Pearson :math:`r` product-moment correlation
* ``'spearman'``: Spearman :math:`\\rho` rank-order correlation
* ``'kendall'``: Kendall's :math:`\\tau_B` correlation (for ordinal data)
* ``'bicor'``: Biweight midcorrelation (robust)
* ``'percbend'``: Percentage bend correlation (robust)
* ``'shepherd'``: Shepherd's pi correlation (robust)
* ``'skipped'``: Skipped correlation (robust)
**kwargs : optional
Optional argument(s) passed to the lower-level correlation functions.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'n'``: Sample size (after removal of missing values)
* ``'outliers'``: number of outliers, only if a robust method was used
* ``'r'``: Correlation coefficient
* ``'CI95%'``: 95% parametric confidence intervals around :math:`r`
* ``'p-val'``: p-value
* ``'BF10'``: Bayes Factor of the alternative hypothesis (only for Pearson correlation)
* ``'power'``: achieved power of the test with an alpha of 0.05.
See also
--------
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
partial_corr : Partial correlation
rm_corr : Repeated measures correlation
Notes
-----
The `Pearson correlation coefficient
<https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
measures the linear relationship between two datasets. Strictly speaking,
Pearson's correlation requires that each dataset be normally distributed.
Correlations of -1 or +1 imply a perfect negative and positive linear
relationship, respectively, with 0 indicating the absence of association.
.. math::
r_{xy} = \\frac{\\sum_i(x_i - \\bar{x})(y_i - \\bar{y})}
{\\sqrt{\\sum_i(x_i - \\bar{x})^2} \\sqrt{\\sum_i(y_i - \\bar{y})^2}}
= \\frac{\\text{cov}(x, y)}{\\sigma_x \\sigma_y}
where :math:`\\text{cov}` is the sample covariance and :math:`\\sigma`
is the sample standard deviation.
If ``method='pearson'``, The Bayes Factor is calculated using the
:py:func:`pingouin.bayesfactor_pearson` function.
The `Spearman correlation coefficient
<https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_
is a non-parametric measure of the monotonicity of the relationship between
two datasets. Unlike the Pearson correlation, the Spearman correlation does
not assume that both datasets are normally distributed. Correlations of -1
or +1 imply an exact negative and positive monotonic relationship,
respectively. Mathematically, the Spearman correlation coefficient is
defined as the Pearson correlation coefficient between the
`rank variables <https://en.wikipedia.org/wiki/Ranking>`_.
The `Kendall correlation coefficient
<https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_
is a measure of the correspondence between two rankings. Values also range
from -1 (perfect disagreement) to 1 (perfect agreement), with 0 indicating
the absence of association. Consistent with
:py:func:`scipy.stats.kendalltau`, Pingouin returns the Tau-b coefficient,
which adjusts for ties:
.. math:: \\tau_B = \\frac{(P - Q)}{\\sqrt{(P + Q + T) (P + Q + U)}}
where :math:`P` is the number of concordant pairs, :math:`Q` the number of
discordand pairs, :math:`T` the number of ties in x, and :math:`U`
the number of ties in y.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ and
percentage bend correlation [1]_ are both robust methods that
protects against *univariate* outliers by down-weighting observations that
deviate too much from the median.
The Shepherd pi [2]_ correlation and skipped [3]_, [4]_ correlation are
both robust methods that returns the Spearman correlation coefficient after
removing *bivariate* outliers. Briefly, the Shepherd pi uses a
bootstrapping of the Mahalanobis distance to identify outliers, while the
skipped correlation is based on the minimum covariance determinant
(which requires scikit-learn). Note that these two methods are
significantly slower than the previous ones.
The confidence intervals for the correlation coefficient are estimated
using the Fisher transformation.
.. important:: Rows with missing values (NaN) are automatically removed.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Schwarzkopf, D.S., De Haas, B., Rees, G., 2012. Better ways to
improve standards in brain-behavior correlation analysis. Front.
Hum. Neurosci. 6, 200. https://doi.org/10.3389/fnhum.2012.00200
.. [3] Rousselet, G.A., Pernet, C.R., 2012. Improving standards in
brain-behavior correlation analyses. Front. Hum. Neurosci. 6, 119.
https://doi.org/10.3389/fnhum.2012.00119
.. [4] Pernet, C.R., Wilcox, R., Rousselet, G.A., 2012. Robust correlation
analyses: false positive and power validation using a new open
source matlab toolbox. Front. Psychol. 3, 606.
https://doi.org/10.3389/fpsyg.2012.00606
Examples
--------
1. Pearson correlation
>>> import numpy as np
>>> import pingouin as pg
>>> # Generate random correlated samples
>>> np.random.seed(123)
>>> mean, cov = [4, 6], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 30).T
>>> # Compute Pearson correlation
>>> pg.corr(x, y).round(3)
n r CI95% p-val BF10 power
pearson 30 0.491 [0.16, 0.72] 0.006 8.55 0.809
2. Pearson correlation with two outliers
>>> x[3], y[5] = 12, -8
>>> pg.corr(x, y).round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.439 0.302 0.121
3. Spearman correlation (robust to outliers)
>>> pg.corr(x, y, method="spearman").round(3)
n r CI95% p-val power
spearman 30 0.401 [0.05, 0.67] 0.028 0.61
4. Biweight midcorrelation (robust)
>>> pg.corr(x, y, method="bicor").round(3)
n r CI95% p-val power
bicor 30 0.393 [0.04, 0.66] 0.031 0.592
5. Percentage bend correlation (robust)
>>> pg.corr(x, y, method='percbend').round(3)
n r CI95% p-val power
percbend 30 0.389 [0.03, 0.66] 0.034 0.581
6. Shepherd's pi correlation (robust)
>>> pg.corr(x, y, method='shepherd').round(3)
n outliers r CI95% p-val power
shepherd 30 2 0.437 [0.08, 0.7] 0.02 0.662
7. Skipped spearman correlation (robust)
>>> pg.corr(x, y, method='skipped').round(3)
n outliers r CI95% p-val power
skipped 30 2 0.437 [0.08, 0.7] 0.02 0.662
8. One-tailed Pearson correlation
>>> pg.corr(x, y, alternative="greater", method='pearson').round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-0.17, 1.0] 0.22 0.467 0.194
>>> pg.corr(x, y, alternative="less", method='pearson').round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-1.0, 0.43] 0.78 0.137 0.008
9. Perfect correlation
>>> pg.corr(x, -x).round(3)
n r CI95% p-val BF10 power
pearson 30 -1.0 [-1.0, -1.0] 0.0 inf 1
10. Using columns of a pandas dataframe
>>> import pandas as pd
>>> data = pd.DataFrame({'x': x, 'y': y})
>>> pg.corr(data['x'], data['y']).round(3)
n r CI95% p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.439 0.302 0.121
"""
# Safety check
x = np.asarray(x)
y = np.asarray(y)
assert x.ndim == y.ndim == 1, "x and y must be 1D array."
assert x.size == y.size, "x and y must have the same length."
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
if "tail" in kwargs:
raise ValueError(
"Since Pingouin 0.4.0, the 'tail' argument has been renamed to 'alternative'."
)
# Remove rows with missing values
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficient and two-sided p-value
if method == "pearson":
r, pval = pearsonr(x, y)
elif method == "spearman":
r, pval = spearmanr(x, y, **kwargs)
elif method == "kendall":
r, pval = kendalltau(x, y, **kwargs)
elif method == "bicor":
r, pval = bicor(x, y, **kwargs)
elif method == "percbend":
r, pval = percbend(x, y, **kwargs)
elif method == "shepherd":
r, pval, outliers = shepherd(x, y, **kwargs)
elif method == "skipped":
r, pval, outliers = skipped(x, y, **kwargs)
else:
raise ValueError(f'Method "{method}" not recognized.')
if np.isnan(r):
# Correlation failed -- new in version v0.3.4, instead of raising an
# error we just return a dataframe full of NaN (except sample size).
# This avoid sudden stop in pingouin.pairwise_corr.
return pd.DataFrame(
{
"n": n,
"r": np.nan,
"CI95%": np.nan,
"p-val": np.nan,
"BF10": np.nan,
"power": np.nan,
},
index=[method],
)
# Sample size after outlier removal
n_outliers = sum(outliers) if "outliers" in locals() else 0
n_clean = n - n_outliers
# Rounding errors caused an r value marginally beyond 1
if abs(r) > 1 and np.isclose(abs(r), 1):
r = np.clip(r, -1, 1)
# Compute the parametric 95% confidence interval and power
if abs(r) == 1:
ci = [r, r]
pr = 1
else:
ci = compute_esci(
stat=r, nx=n_clean, ny=n_clean, eftype="r", decimals=6, alternative=alternative
)
pr = power_corr(r=r, n=n_clean, power=None, alpha=0.05, alternative=alternative)
# Recompute p-value if tail is one-sided
if alternative != "two-sided":
pval = _correl_pvalue(r, n_clean, k=0, alternative=alternative)
# Create dictionnary
stats = {"n": n, "r": r, "CI95%": [ci], "p-val": pval, "power": pr}
if method in ["shepherd", "skipped"]:
stats["outliers"] = n_outliers
# Compute the BF10 for Pearson correlation only
if method == "pearson":
stats["BF10"] = bayesfactor_pearson(r, n_clean, alternative=alternative)
# Convert to DataFrame
stats = pd.DataFrame(stats, index=[method])
# Define order
col_keep = ["n", "outliers", "r", "CI95%", "p-val", "BF10", "power"]
col_order = [k for k in col_keep if k in stats.keys().tolist()]
return _postprocess_dataframe(stats)[col_order]
| (x, y, alternative='two-sided', method='pearson', **kwargs) |
32,001 | pingouin.reliability | cronbach_alpha | Cronbach's alpha reliability measure.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Wide or long-format dataframe.
items : str
Column in ``data`` with the items names (long-format only).
scores : str
Column in ``data`` with the scores (long-format only).
subject : str
Column in ``data`` with the subject identifier (long-format only).
nan_policy : bool
If `'listwise'`, remove the entire rows that contain missing values
(= listwise deletion). If `'pairwise'` (default), only pairwise
missing values are removed when computing the covariance matrix.
For more details, please refer to the :py:meth:`pandas.DataFrame.cov`
method.
ci : float
Confidence interval (.95 = 95%)
Returns
-------
alpha : float
Cronbach's alpha
Notes
-----
This function works with both wide and long format dataframe. If you pass a
long-format dataframe, you must also pass the ``items``, ``scores`` and
``subj`` columns (in which case the data will be converted into wide
format using the :py:meth:`pandas.DataFrame.pivot` method).
Internal consistency is usually measured with Cronbach's alpha [1]_,
a statistic calculated from the pairwise correlations between items.
Internal consistency ranges between negative infinity and one.
Coefficient alpha will be negative whenever there is greater
within-subject variability than between-subject variability.
Cronbach's :math:`\alpha` is defined as
.. math::
\alpha ={k \over k-1}\left(1-{\sum_{{i=1}}^{k}\sigma_{{y_{i}}}^{2}
\over\sigma_{x}^{2}}\right)
where :math:`k` refers to the number of items, :math:`\sigma_{x}^{2}`
is the variance of the observed total scores, and
:math:`\sigma_{{y_{i}}}^{2}` the variance of component :math:`i` for
the current sample of subjects.
Another formula for Cronbach's :math:`\alpha` is
.. math::
\alpha = \frac{k \times \bar c}{\bar v + (k - 1) \times \bar c}
where :math:`\bar c` refers to the average of all covariances between
items and :math:`\bar v` to the average variance of each item.
95% confidence intervals are calculated using Feldt's method [2]_:
.. math::
c_L = 1 - (1 - \alpha) \cdot F_{(0.025, n-1, (n-1)(k-1))}
c_U = 1 - (1 - \alpha) \cdot F_{(0.975, n-1, (n-1)(k-1))}
where :math:`n` is the number of subjects and :math:`k` the number of
items.
Results have been tested against the `psych
<https://cran.r-project.org/web/packages/psych/psych.pdf>`_ R package.
References
----------
.. [1] http://www.real-statistics.com/reliability/cronbachs-alpha/
.. [2] Feldt, Leonard S., Woodruff, David J., & Salih, Fathi A. (1987).
Statistical inference for coefficient alpha. Applied Psychological
Measurement, 11(1):93-103.
Examples
--------
Binary wide-format dataframe (with missing values)
>>> import pingouin as pg
>>> data = pg.read_dataset('cronbach_wide_missing')
>>> # In R: psych:alpha(data, use="pairwise")
>>> pg.cronbach_alpha(data=data)
(0.732660835214447, array([0.435, 0.909]))
After listwise deletion of missing values (remove the entire rows)
>>> # In R: psych:alpha(data, use="complete.obs")
>>> pg.cronbach_alpha(data=data, nan_policy='listwise')
(0.8016949152542373, array([0.581, 0.933]))
After imputing the missing values with the median of each column
>>> pg.cronbach_alpha(data=data.fillna(data.median()))
(0.7380191693290734, array([0.447, 0.911]))
Likert-type long-format dataframe
>>> data = pg.read_dataset('cronbach_alpha')
>>> pg.cronbach_alpha(data=data, items='Items', scores='Scores',
... subject='Subj')
(0.5917188485995826, array([0.195, 0.84 ]))
| def cronbach_alpha(
data=None, items=None, scores=None, subject=None, nan_policy="pairwise", ci=0.95
):
"""Cronbach's alpha reliability measure.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Wide or long-format dataframe.
items : str
Column in ``data`` with the items names (long-format only).
scores : str
Column in ``data`` with the scores (long-format only).
subject : str
Column in ``data`` with the subject identifier (long-format only).
nan_policy : bool
If `'listwise'`, remove the entire rows that contain missing values
(= listwise deletion). If `'pairwise'` (default), only pairwise
missing values are removed when computing the covariance matrix.
For more details, please refer to the :py:meth:`pandas.DataFrame.cov`
method.
ci : float
Confidence interval (.95 = 95%)
Returns
-------
alpha : float
Cronbach's alpha
Notes
-----
This function works with both wide and long format dataframe. If you pass a
long-format dataframe, you must also pass the ``items``, ``scores`` and
``subj`` columns (in which case the data will be converted into wide
format using the :py:meth:`pandas.DataFrame.pivot` method).
Internal consistency is usually measured with Cronbach's alpha [1]_,
a statistic calculated from the pairwise correlations between items.
Internal consistency ranges between negative infinity and one.
Coefficient alpha will be negative whenever there is greater
within-subject variability than between-subject variability.
Cronbach's :math:`\\alpha` is defined as
.. math::
\\alpha ={k \\over k-1}\\left(1-{\\sum_{{i=1}}^{k}\\sigma_{{y_{i}}}^{2}
\\over\\sigma_{x}^{2}}\\right)
where :math:`k` refers to the number of items, :math:`\\sigma_{x}^{2}`
is the variance of the observed total scores, and
:math:`\\sigma_{{y_{i}}}^{2}` the variance of component :math:`i` for
the current sample of subjects.
Another formula for Cronbach's :math:`\\alpha` is
.. math::
\\alpha = \\frac{k \\times \\bar c}{\\bar v + (k - 1) \\times \\bar c}
where :math:`\\bar c` refers to the average of all covariances between
items and :math:`\\bar v` to the average variance of each item.
95% confidence intervals are calculated using Feldt's method [2]_:
.. math::
c_L = 1 - (1 - \\alpha) \\cdot F_{(0.025, n-1, (n-1)(k-1))}
c_U = 1 - (1 - \\alpha) \\cdot F_{(0.975, n-1, (n-1)(k-1))}
where :math:`n` is the number of subjects and :math:`k` the number of
items.
Results have been tested against the `psych
<https://cran.r-project.org/web/packages/psych/psych.pdf>`_ R package.
References
----------
.. [1] http://www.real-statistics.com/reliability/cronbachs-alpha/
.. [2] Feldt, Leonard S., Woodruff, David J., & Salih, Fathi A. (1987).
Statistical inference for coefficient alpha. Applied Psychological
Measurement, 11(1):93-103.
Examples
--------
Binary wide-format dataframe (with missing values)
>>> import pingouin as pg
>>> data = pg.read_dataset('cronbach_wide_missing')
>>> # In R: psych:alpha(data, use="pairwise")
>>> pg.cronbach_alpha(data=data)
(0.732660835214447, array([0.435, 0.909]))
After listwise deletion of missing values (remove the entire rows)
>>> # In R: psych:alpha(data, use="complete.obs")
>>> pg.cronbach_alpha(data=data, nan_policy='listwise')
(0.8016949152542373, array([0.581, 0.933]))
After imputing the missing values with the median of each column
>>> pg.cronbach_alpha(data=data.fillna(data.median()))
(0.7380191693290734, array([0.447, 0.911]))
Likert-type long-format dataframe
>>> data = pg.read_dataset('cronbach_alpha')
>>> pg.cronbach_alpha(data=data, items='Items', scores='Scores',
... subject='Subj')
(0.5917188485995826, array([0.195, 0.84 ]))
"""
# Safety check
assert isinstance(data, pd.DataFrame), "data must be a dataframe."
assert nan_policy in ["pairwise", "listwise"]
if all([v is not None for v in [items, scores, subject]]):
# Data in long-format: we first convert to a wide format
data = data.pivot(index=subject, values=scores, columns=items)
# From now we assume that data is in wide format
n, k = data.shape
assert k >= 2, "At least two items are required."
assert n >= 2, "At least two raters/subjects are required."
err = "All columns must be numeric."
assert all([data[c].dtype.kind in "bfiu" for c in data.columns]), err
if data.isna().any().any() and nan_policy == "listwise":
# In R = psych:alpha(data, use="complete.obs")
data = data.dropna(axis=0, how="any")
# Compute covariance matrix and Cronbach's alpha
C = data.cov(numeric_only=True)
cronbach = (k / (k - 1)) * (1 - np.trace(C) / C.sum().sum())
# which is equivalent to
# v = np.diag(C).mean()
# c = C.to_numpy()[np.tril_indices_from(C, k=-1)].mean()
# cronbach = (k * c) / (v + (k - 1) * c)
# Confidence intervals
alpha = 1 - ci
df1 = n - 1
df2 = df1 * (k - 1)
lower = 1 - (1 - cronbach) * f.isf(alpha / 2, df1, df2)
upper = 1 - (1 - cronbach) * f.isf(1 - alpha / 2, df1, df2)
return cronbach, np.round([lower, upper], 3)
| (data=None, items=None, scores=None, subject=None, nan_policy='pairwise', ci=0.95) |
32,003 | pingouin.contingency | dichotomous_crosstab |
Generates a 2x2 contingency table from a :py:class:`pandas.DataFrame` that
contains only dichotomous entries, which are converted to 0 or 1.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Pandas dataframe
x, y : string
Column names in ``data``.
Currently, Pingouin recognizes the following values as dichotomous
measurements:
* ``0``, ``0.0``, ``False``, ``'No'``, ``'N'``, ``'Absent'``, ``'False'``, ``'F'`` or ``'Negative'`` for negative cases;
* ``1``, ``1.0``, ``True``, ``'Yes'``, ``'Y'``, ``'Present'``, ``'True'``, ``'T'``, ``'Positive'`` or ``'P'``, for positive cases;
If strings are used, Pingouin will recognize them regardless of their
uppercase/lowercase combinations.
Returns
-------
crosstab : :py:class:`pandas.DataFrame`
The 2x2 crosstab. See :py:func:`pandas.crosstab` for more details.
Examples
--------
>>> import pandas as pd
>>> import pingouin as pg
>>> df = pd.DataFrame({'A': ['Yes', 'No', 'No'], 'B': [0., 1., 0.]})
>>> pg.dichotomous_crosstab(data=df, x='A', y='B')
B 0 1
A
0 1 1
1 1 0
| def dichotomous_crosstab(data, x, y):
"""
Generates a 2x2 contingency table from a :py:class:`pandas.DataFrame` that
contains only dichotomous entries, which are converted to 0 or 1.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Pandas dataframe
x, y : string
Column names in ``data``.
Currently, Pingouin recognizes the following values as dichotomous
measurements:
* ``0``, ``0.0``, ``False``, ``'No'``, ``'N'``, ``'Absent'``,\
``'False'``, ``'F'`` or ``'Negative'`` for negative cases;
* ``1``, ``1.0``, ``True``, ``'Yes'``, ``'Y'``, ``'Present'``,\
``'True'``, ``'T'``, ``'Positive'`` or ``'P'``, for positive cases;
If strings are used, Pingouin will recognize them regardless of their
uppercase/lowercase combinations.
Returns
-------
crosstab : :py:class:`pandas.DataFrame`
The 2x2 crosstab. See :py:func:`pandas.crosstab` for more details.
Examples
--------
>>> import pandas as pd
>>> import pingouin as pg
>>> df = pd.DataFrame({'A': ['Yes', 'No', 'No'], 'B': [0., 1., 0.]})
>>> pg.dichotomous_crosstab(data=df, x='A', y='B')
B 0 1
A
0 1 1
1 1 0
"""
crosstab = pd.crosstab(_dichotomize_series(data, x), _dichotomize_series(data, y))
shape = crosstab.shape
if shape != (2, 2):
if shape == (2, 1):
crosstab.loc[:, int(not bool(crosstab.columns[0]))] = [0, 0]
elif shape == (1, 2):
crosstab.loc[int(not bool(crosstab.index[0])), :] = [0, 0]
else: # shape = (1, 1) or shape = (>2, >2)
raise ValueError(
"Both series contain only one unique value. " "Cannot build 2x2 contingency table."
)
crosstab = crosstab.sort_index(axis=0).sort_index(axis=1)
return crosstab
| (data, x, y) |
32,004 | pingouin.correlation | distance_corr | Distance correlation between two arrays.
Statistical significance (p-value) is evaluated with a permutation test.
Parameters
----------
x, y : array_like
1D or 2D input arrays, shape (n_samples, n_features).
``x`` and ``y`` must have the same number of samples and must not
contain missing values.
alternative : str
Alternative of the test. Can be either "two-sided", "greater" (default) or "less".
To be consistent with the original R implementation, the default is to calculate the
one-sided "greater" p-value.
n_boot : int or None
Number of bootstrap to perform. If None, no bootstrapping is performed and the function
only returns the distance correlation (no p-value). Default is 1000 (thus giving a
precision of 0.001).
seed : int or None
Random state seed.
Returns
-------
dcor : float
Sample distance correlation (range from 0 to 1).
pval : float
P-value.
Notes
-----
From Wikipedia:
*Distance correlation is a measure of dependence between two paired
random vectors of arbitrary, not necessarily equal, dimension. The
distance correlation coefficient is zero if and only if the random
vectors are independent. Thus, distance correlation measures both
linear and nonlinear association between two random variables or
random vectors. This is in contrast to Pearson's correlation, which can
only detect linear association between two random variables.*
The distance correlation of two random variables is obtained by
dividing their distance covariance by the product of their distance
standard deviations:
.. math::
\text{dCor}(X, Y) = \frac{\text{dCov}(X, Y)}
{\sqrt{\text{dVar}(X) \cdot \text{dVar}(Y)}}
where :math:`\text{dCov}(X, Y)` is the square root of the arithmetic
average of the product of the double-centered pairwise Euclidean distance
matrices.
Note that by contrast to Pearson's correlation, the distance correlation
cannot be negative, i.e :math:`0 \leq \text{dCor} \leq 1`.
Results have been tested against the
`energy <https://cran.r-project.org/web/packages/energy/energy.pdf>`_
R package.
References
----------
* https://en.wikipedia.org/wiki/Distance_correlation
* Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007).
Measuring and testing dependence by correlation of distances.
The annals of statistics, 35(6), 2769-2794.
* https://gist.github.com/satra/aa3d19a12b74e9ab7941
* https://gist.github.com/wladston/c931b1495184fbb99bec
Examples
--------
1. With two 1D vectors
>>> from pingouin import distance_corr
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 9, 4, 4]
>>> dcor, pval = distance_corr(a, b, seed=9)
>>> print(round(dcor, 3), pval)
0.763 0.312
2. With two 2D arrays and no p-value
>>> import numpy as np
>>> np.random.seed(123)
>>> from pingouin import distance_corr
>>> a = np.random.random((10, 10))
>>> b = np.random.random((10, 10))
>>> round(distance_corr(a, b, n_boot=None), 3)
0.88
| def distance_corr(x, y, alternative="greater", n_boot=1000, seed=None):
"""Distance correlation between two arrays.
Statistical significance (p-value) is evaluated with a permutation test.
Parameters
----------
x, y : array_like
1D or 2D input arrays, shape (n_samples, n_features).
``x`` and ``y`` must have the same number of samples and must not
contain missing values.
alternative : str
Alternative of the test. Can be either "two-sided", "greater" (default) or "less".
To be consistent with the original R implementation, the default is to calculate the
one-sided "greater" p-value.
n_boot : int or None
Number of bootstrap to perform. If None, no bootstrapping is performed and the function
only returns the distance correlation (no p-value). Default is 1000 (thus giving a
precision of 0.001).
seed : int or None
Random state seed.
Returns
-------
dcor : float
Sample distance correlation (range from 0 to 1).
pval : float
P-value.
Notes
-----
From Wikipedia:
*Distance correlation is a measure of dependence between two paired
random vectors of arbitrary, not necessarily equal, dimension. The
distance correlation coefficient is zero if and only if the random
vectors are independent. Thus, distance correlation measures both
linear and nonlinear association between two random variables or
random vectors. This is in contrast to Pearson's correlation, which can
only detect linear association between two random variables.*
The distance correlation of two random variables is obtained by
dividing their distance covariance by the product of their distance
standard deviations:
.. math::
\\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)}
{\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}}
where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic
average of the product of the double-centered pairwise Euclidean distance
matrices.
Note that by contrast to Pearson's correlation, the distance correlation
cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`.
Results have been tested against the
`energy <https://cran.r-project.org/web/packages/energy/energy.pdf>`_
R package.
References
----------
* https://en.wikipedia.org/wiki/Distance_correlation
* Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007).
Measuring and testing dependence by correlation of distances.
The annals of statistics, 35(6), 2769-2794.
* https://gist.github.com/satra/aa3d19a12b74e9ab7941
* https://gist.github.com/wladston/c931b1495184fbb99bec
Examples
--------
1. With two 1D vectors
>>> from pingouin import distance_corr
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 9, 4, 4]
>>> dcor, pval = distance_corr(a, b, seed=9)
>>> print(round(dcor, 3), pval)
0.763 0.312
2. With two 2D arrays and no p-value
>>> import numpy as np
>>> np.random.seed(123)
>>> from pingouin import distance_corr
>>> a = np.random.random((10, 10))
>>> b = np.random.random((10, 10))
>>> round(distance_corr(a, b, n_boot=None), 3)
0.88
"""
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
x = np.asarray(x)
y = np.asarray(y)
# Check for NaN values
if any([np.isnan(np.min(x)), np.isnan(np.min(y))]):
raise ValueError("Input arrays must not contain NaN values.")
if x.ndim == 1:
x = x[:, None]
if y.ndim == 1:
y = y[:, None]
assert x.shape[0] == y.shape[0], "x and y must have same number of samples"
# Extract number of samples
n = x.shape[0]
n2 = n**2
# Process first array to avoid redundancy when performing bootstrap
a = squareform(pdist(x, metric="euclidean"))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
dcov2_xx = np.vdot(A, A) / n2
# Process second array and compute final distance correlation
dcor = _dcorr(y, n2, A, dcov2_xx)
# Compute one-sided p-value using a bootstrap procedure
if n_boot is not None and n_boot > 1:
# Define random seed and permutation
rng = np.random.RandomState(seed)
bootsam = rng.random_sample((n_boot, n)).argsort(axis=1)
bootstat = np.empty(n_boot)
for i in range(n_boot):
bootstat[i] = _dcorr(y[bootsam[i, :]], n2, A, dcov2_xx)
pval = _perm_pval(bootstat, dcor, alternative=alternative)
return dcor, pval
else:
return dcor
| (x, y, alternative='greater', n_boot=1000, seed=None) |
32,007 | pingouin.distribution | epsilon | Epsilon adjustement factor for repeated measures.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame containing the repeated measurements.
Both wide and long-format dataframe are supported for this function.
To test for an interaction term between two repeated measures factors
with a wide-format dataframe, ``data`` must have a two-levels
:py:class:`pandas.MultiIndex` columns.
dv : string
Name of column containing the dependent variable (only required if
``data`` is in long format).
within : string
Name of column containing the within factor (only required if ``data``
is in long format).
If ``within`` is a list with two strings, this function computes
the epsilon factor for the interaction between the two within-subject
factor.
subject : string
Name of column containing the subject identifier (only required if
``data`` is in long format).
correction : string
Specify the epsilon version:
* ``'gg'``: Greenhouse-Geisser
* ``'hf'``: Huynh-Feldt
* ``'lb'``: Lower bound
Returns
-------
eps : float
Epsilon adjustement factor.
See Also
--------
sphericity : Mauchly and JNS test for sphericity.
homoscedasticity : Test equality of variance.
Notes
-----
The lower bound epsilon is:
.. math:: lb = \frac{1}{\text{dof}},
where the degrees of freedom :math:`\text{dof}` is the number of groups
:math:`k` minus 1 for one-way design and :math:`(k_1 - 1)(k_2 - 1)`
for two-way design
The Greenhouse-Geisser epsilon is given by:
.. math::
\epsilon_{GG} = \frac{k^2(\overline{\text{diag}(S)} -
\overline{S})^2}{(k-1)(\sum_{i=1}^{k}\sum_{j=1}^{k}s_{ij}^2 -
2k\sum_{j=1}^{k}\overline{s_i}^2 + k^2\overline{S}^2)}
where :math:`S` is the covariance matrix, :math:`\overline{S}` the
grandmean of S and :math:`\overline{\text{diag}(S)}` the mean of all the
elements on the diagonal of S (i.e. mean of the variances).
The Huynh-Feldt epsilon is given by:
.. math::
\epsilon_{HF} = \frac{n(k-1)\epsilon_{GG}-2}{(k-1)
(n-1-(k-1)\epsilon_{GG})}
where :math:`n` is the number of observations.
Missing values are automatically removed from data (listwise deletion).
Examples
--------
Using a wide-format dataframe
>>> import pandas as pd
>>> import pingouin as pg
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> gg = pg.epsilon(data, correction='gg')
>>> hf = pg.epsilon(data, correction='hf')
>>> lb = pg.epsilon(data, correction='lb')
>>> print("%.2f %.2f %.2f" % (lb, gg, hf))
0.50 0.56 0.62
Now using a long-format dataframe
>>> data = pg.read_dataset('rm_anova2')
>>> data.head()
Subject Time Metric Performance
0 1 Pre Product 13
1 2 Pre Product 12
2 3 Pre Product 17
3 4 Pre Product 12
4 5 Pre Product 19
Let's first calculate the epsilon of the *Time* within-subject factor
>>> pg.epsilon(data, dv='Performance', subject='Subject',
... within='Time')
1.0
Since *Time* has only two levels (Pre and Post), the sphericity assumption
is necessarily met, and therefore the epsilon adjustement factor is 1.
The *Metric* factor, however, has three levels:
>>> round(pg.epsilon(data, dv='Performance', subject='Subject',
... within=['Metric']), 3)
0.969
The epsilon value is very close to 1, meaning that there is no major
violation of sphericity.
Now, let's calculate the epsilon for the interaction between the two
repeated measures factor:
>>> round(pg.epsilon(data, dv='Performance', subject='Subject',
... within=['Time', 'Metric']), 3)
0.727
Alternatively, we could use a wide-format dataframe with two column
levels:
>>> # Pivot from long-format to wide-format
>>> piv = data.pivot(index='Subject', columns=['Time', 'Metric'], values='Performance')
>>> piv.head()
Time Pre Post
Metric Product Client Action Product Client Action
Subject
1 13 12 17 18 30 34
2 12 19 18 6 18 30
3 17 19 24 21 31 32
4 12 25 25 18 39 40
5 19 27 19 18 28 27
>>> round(pg.epsilon(piv), 3)
0.727
which gives the same epsilon value as the long-format dataframe.
| def epsilon(data, dv=None, within=None, subject=None, correction="gg"):
"""Epsilon adjustement factor for repeated measures.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame containing the repeated measurements.
Both wide and long-format dataframe are supported for this function.
To test for an interaction term between two repeated measures factors
with a wide-format dataframe, ``data`` must have a two-levels
:py:class:`pandas.MultiIndex` columns.
dv : string
Name of column containing the dependent variable (only required if
``data`` is in long format).
within : string
Name of column containing the within factor (only required if ``data``
is in long format).
If ``within`` is a list with two strings, this function computes
the epsilon factor for the interaction between the two within-subject
factor.
subject : string
Name of column containing the subject identifier (only required if
``data`` is in long format).
correction : string
Specify the epsilon version:
* ``'gg'``: Greenhouse-Geisser
* ``'hf'``: Huynh-Feldt
* ``'lb'``: Lower bound
Returns
-------
eps : float
Epsilon adjustement factor.
See Also
--------
sphericity : Mauchly and JNS test for sphericity.
homoscedasticity : Test equality of variance.
Notes
-----
The lower bound epsilon is:
.. math:: lb = \\frac{1}{\\text{dof}},
where the degrees of freedom :math:`\\text{dof}` is the number of groups
:math:`k` minus 1 for one-way design and :math:`(k_1 - 1)(k_2 - 1)`
for two-way design
The Greenhouse-Geisser epsilon is given by:
.. math::
\\epsilon_{GG} = \\frac{k^2(\\overline{\\text{diag}(S)} -
\\overline{S})^2}{(k-1)(\\sum_{i=1}^{k}\\sum_{j=1}^{k}s_{ij}^2 -
2k\\sum_{j=1}^{k}\\overline{s_i}^2 + k^2\\overline{S}^2)}
where :math:`S` is the covariance matrix, :math:`\\overline{S}` the
grandmean of S and :math:`\\overline{\\text{diag}(S)}` the mean of all the
elements on the diagonal of S (i.e. mean of the variances).
The Huynh-Feldt epsilon is given by:
.. math::
\\epsilon_{HF} = \\frac{n(k-1)\\epsilon_{GG}-2}{(k-1)
(n-1-(k-1)\\epsilon_{GG})}
where :math:`n` is the number of observations.
Missing values are automatically removed from data (listwise deletion).
Examples
--------
Using a wide-format dataframe
>>> import pandas as pd
>>> import pingouin as pg
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> gg = pg.epsilon(data, correction='gg')
>>> hf = pg.epsilon(data, correction='hf')
>>> lb = pg.epsilon(data, correction='lb')
>>> print("%.2f %.2f %.2f" % (lb, gg, hf))
0.50 0.56 0.62
Now using a long-format dataframe
>>> data = pg.read_dataset('rm_anova2')
>>> data.head()
Subject Time Metric Performance
0 1 Pre Product 13
1 2 Pre Product 12
2 3 Pre Product 17
3 4 Pre Product 12
4 5 Pre Product 19
Let's first calculate the epsilon of the *Time* within-subject factor
>>> pg.epsilon(data, dv='Performance', subject='Subject',
... within='Time')
1.0
Since *Time* has only two levels (Pre and Post), the sphericity assumption
is necessarily met, and therefore the epsilon adjustement factor is 1.
The *Metric* factor, however, has three levels:
>>> round(pg.epsilon(data, dv='Performance', subject='Subject',
... within=['Metric']), 3)
0.969
The epsilon value is very close to 1, meaning that there is no major
violation of sphericity.
Now, let's calculate the epsilon for the interaction between the two
repeated measures factor:
>>> round(pg.epsilon(data, dv='Performance', subject='Subject',
... within=['Time', 'Metric']), 3)
0.727
Alternatively, we could use a wide-format dataframe with two column
levels:
>>> # Pivot from long-format to wide-format
>>> piv = data.pivot(index='Subject', columns=['Time', 'Metric'], values='Performance')
>>> piv.head()
Time Pre Post
Metric Product Client Action Product Client Action
Subject
1 13 12 17 18 30 34
2 12 19 18 6 18 30
3 17 19 24 21 31 32
4 12 25 25 18 39 40
5 19 27 19 18 28 27
>>> round(pg.epsilon(piv), 3)
0.727
which gives the same epsilon value as the long-format dataframe.
"""
assert isinstance(data, pd.DataFrame), "Data must be a pandas Dataframe."
# If data is in long-format, convert to wide-format
if all([v is not None for v in [dv, within, subject]]):
data = _long_to_wide_rm(data, dv=dv, within=within, subject=subject)
# From now on we assume that data is in wide-format and contains only
# the relevant columns.
# Drop rows with missing values
data = data.dropna()
# Support for two-way factor of shape (2, N)
data = _check_multilevel_rm(data, func="epsilon")
# Covariance matrix
S = data.cov(numeric_only=True)
n, k = data.shape
# Epsilon is always 1 with only two repeated measures.
if k <= 2:
return 1.0
# Degrees of freedom
if S.columns.nlevels == 1:
# One-way design
dof = k - 1
else:
# Two-way design (>2, >2)
ka, kb = S.columns.levshape
dof = (ka - 1) * (kb - 1)
# Lower bound
if correction == "lb":
return 1 / dof
# Greenhouse-Geisser
# Method 1. Sums of squares. (see real-statistics.com)
mean_var = np.diag(S).mean()
S_mean = S.mean().mean()
ss_mat = (S**2).sum().sum()
ss_rows = (S.mean(1) ** 2).sum().sum()
num = (k * (mean_var - S_mean)) ** 2
den = (k - 1) * (ss_mat - 2 * k * ss_rows + k**2 * S_mean**2)
eps = np.min([num / den, 1])
# Method 2. Eigenvalues.
# Sv = S.to_numpy()
# S_pop = Sv - Sv.mean(0)[:, None] - Sv.mean(1)[None, :] + Sv.mean()
# eig = np.linalg.eigvalsh(S_pop)
# eig = eig[eig > 0.1]
# V = eig.sum()**2 / np.sum(eig**2)
# eps = np.min([V / dof, 1])
# Huynh-Feldt
if correction == "hf":
num = n * dof * eps - 2
den = dof * (n - 1 - dof * eps)
eps = np.min([num / den, 1])
return eps
| (data, dv=None, within=None, subject=None, correction='gg') |
32,009 | pingouin.nonparametric | friedman | Friedman test for repeated measurements.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Both wide and long-format dataframe are supported for this test.
dv : string
Name of column containing the dependent variable (only required if ``data`` is in
long format).
within : string
Name of column containing the within-subject factor (only required if ``data`` is in
long format). Two or more within-factor are not currently supported.
subject : string
Name of column containing the subject/rater identifier (only required if ``data`` is in
long format).
method : string
Statistical test to perform. Must be ``'chisq'`` (chi-square test) or ``'f'`` (F test).
See notes below for explanation.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'W'``: Kendall's coefficient of concordance, corrected for ties
If ``method='chisq'``
* ``'Q'``: The Friedman chi-square statistic, corrected for ties
* ``'dof'``: degrees of freedom
* ``'p-unc'``: Uncorrected p-value of the chi squared test
If ``method='f'``
* ``'F'``: The Friedman F statistic, corrected for ties
* ``'dof1'``: degrees of freedom of the numerator
* ``'dof2'``: degrees of freedom of the denominator
* ``'p-unc'``: Uncorrected p-value of the F test
Notes
-----
The Friedman test is used for non-parametric (rank-based) one-way repeated measures ANOVA.
It is equivalent to the test of significance of Kendalls's
coefficient of concordance (Kendall's W). Most commonly a Q statistic,
which has asymptotical chi-squared distribution, is computed and used for
testing. However, the chi-squared test tend to be overly conservative for small numbers
of samples and/or repeated measures, in which case a F-test is more adequate [1]_.
Data can be in wide or long format. Missing values are automatically removed using a
strict listwise approach (= complete-case analysis). In other words, any subject with one or
more missing value(s) is completely removed from the dataframe prior to running the
test.
References
----------
.. [1] Marozzi, M. (2014). Testing for concordance between several
criteria. Journal of Statistical Computation and Simulation,
84(9), 1843–1850. https://doi.org/10.1080/00949655.2013.766189
.. [2] https://www.real-statistics.com/anova-repeated-measures/friedman-test/
Examples
--------
Compute the Friedman test for repeated measurements, using a wide-format dataframe
>>> import pandas as pd
>>> import pingouin as pg
>>> df = pd.DataFrame({
... 'white': {0: 10, 1: 8, 2: 7, 3: 9, 4: 7, 5: 4, 6: 5, 7: 6, 8: 5, 9: 10, 10: 4, 11: 7},
... 'red': {0: 7, 1: 5, 2: 8, 3: 6, 4: 5, 5: 7, 6: 9, 7: 6, 8: 4, 9: 6, 10: 7, 11: 3},
... 'rose': {0: 8, 1: 5, 2: 6, 3: 4, 4: 7, 5: 5, 6: 3, 7: 7, 8: 6, 9: 4, 10: 4, 11: 3}})
>>> pg.friedman(df)
Source W ddof1 Q p-unc
Friedman Within 0.083333 2 2.0 0.367879
Compare with SciPy
>>> from scipy.stats import friedmanchisquare
>>> friedmanchisquare(*df.to_numpy().T)
FriedmanchisquareResult(statistic=1.9999999999999893, pvalue=0.3678794411714444)
Using a long-format dataframe
>>> df_long = df.melt(ignore_index=False).reset_index()
>>> pg.friedman(data=df_long, dv="value", within="variable", subject="index")
Source W ddof1 Q p-unc
Friedman variable 0.083333 2 2.0 0.367879
Using the F-test method
>>> pg.friedman(df, method="f")
Source W ddof1 ddof2 F p-unc
Friedman Within 0.083333 1.833333 20.166667 1.0 0.378959
| def friedman(data=None, dv=None, within=None, subject=None, method="chisq"):
"""Friedman test for repeated measurements.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Both wide and long-format dataframe are supported for this test.
dv : string
Name of column containing the dependent variable (only required if ``data`` is in
long format).
within : string
Name of column containing the within-subject factor (only required if ``data`` is in
long format). Two or more within-factor are not currently supported.
subject : string
Name of column containing the subject/rater identifier (only required if ``data`` is in
long format).
method : string
Statistical test to perform. Must be ``'chisq'`` (chi-square test) or ``'f'`` (F test).
See notes below for explanation.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'W'``: Kendall's coefficient of concordance, corrected for ties
If ``method='chisq'``
* ``'Q'``: The Friedman chi-square statistic, corrected for ties
* ``'dof'``: degrees of freedom
* ``'p-unc'``: Uncorrected p-value of the chi squared test
If ``method='f'``
* ``'F'``: The Friedman F statistic, corrected for ties
* ``'dof1'``: degrees of freedom of the numerator
* ``'dof2'``: degrees of freedom of the denominator
* ``'p-unc'``: Uncorrected p-value of the F test
Notes
-----
The Friedman test is used for non-parametric (rank-based) one-way repeated measures ANOVA.
It is equivalent to the test of significance of Kendalls's
coefficient of concordance (Kendall's W). Most commonly a Q statistic,
which has asymptotical chi-squared distribution, is computed and used for
testing. However, the chi-squared test tend to be overly conservative for small numbers
of samples and/or repeated measures, in which case a F-test is more adequate [1]_.
Data can be in wide or long format. Missing values are automatically removed using a
strict listwise approach (= complete-case analysis). In other words, any subject with one or
more missing value(s) is completely removed from the dataframe prior to running the
test.
References
----------
.. [1] Marozzi, M. (2014). Testing for concordance between several
criteria. Journal of Statistical Computation and Simulation,
84(9), 1843–1850. https://doi.org/10.1080/00949655.2013.766189
.. [2] https://www.real-statistics.com/anova-repeated-measures/friedman-test/
Examples
--------
Compute the Friedman test for repeated measurements, using a wide-format dataframe
>>> import pandas as pd
>>> import pingouin as pg
>>> df = pd.DataFrame({
... 'white': {0: 10, 1: 8, 2: 7, 3: 9, 4: 7, 5: 4, 6: 5, 7: 6, 8: 5, 9: 10, 10: 4, 11: 7},
... 'red': {0: 7, 1: 5, 2: 8, 3: 6, 4: 5, 5: 7, 6: 9, 7: 6, 8: 4, 9: 6, 10: 7, 11: 3},
... 'rose': {0: 8, 1: 5, 2: 6, 3: 4, 4: 7, 5: 5, 6: 3, 7: 7, 8: 6, 9: 4, 10: 4, 11: 3}})
>>> pg.friedman(df)
Source W ddof1 Q p-unc
Friedman Within 0.083333 2 2.0 0.367879
Compare with SciPy
>>> from scipy.stats import friedmanchisquare
>>> friedmanchisquare(*df.to_numpy().T)
FriedmanchisquareResult(statistic=1.9999999999999893, pvalue=0.3678794411714444)
Using a long-format dataframe
>>> df_long = df.melt(ignore_index=False).reset_index()
>>> pg.friedman(data=df_long, dv="value", within="variable", subject="index")
Source W ddof1 Q p-unc
Friedman variable 0.083333 2 2.0 0.367879
Using the F-test method
>>> pg.friedman(df, method="f")
Source W ddof1 ddof2 F p-unc
Friedman Within 0.083333 1.833333 20.166667 1.0 0.378959
"""
# Convert from wide to long-format, if needed
if all([v is None for v in [dv, within, subject]]):
assert isinstance(data, pd.DataFrame)
data = data._get_numeric_data().dropna() # Listwise deletion of missing values
assert data.shape[0] > 2, "Data must have at least 3 non-missing rows."
assert data.shape[1] > 1, "Data must contain at least two columns."
data["Subj"] = np.arange(data.shape[0])
data = data.melt(id_vars="Subj", var_name="Within", value_name="DV")
subject, within, dv = "Subj", "Within", "DV"
# Check dataframe
data = _check_dataframe(dv=dv, within=within, data=data, subject=subject, effects="within")
assert not data[within].isnull().any(), "Cannot have missing values in `within`."
assert not data[subject].isnull().any(), "Cannot have missing values in `subject`."
# Pivot the table to a wide-format dataframe. This has several effects:
# 1) Force missing values to be explicit (a NaN cell is created)
# 2) Automatic collapsing to the mean if multiple within factors are present
# 3) If using dropna, remove rows with missing values (listwise deletion).
# The latter is the same behavior as JASP (= strict complete-case analysis).
data_piv = data.pivot_table(index=subject, columns=within, values=dv, observed=True)
data_piv = data_piv.dropna()
# Extract data in numpy array and calculate ranks
X = data_piv.to_numpy()
n, k = X.shape
ranked = scipy.stats.rankdata(X, axis=1)
ssbn = (ranked.sum(axis=0) ** 2).sum()
# Correction for ties
ties = 0
for i in range(n):
replist, repnum = scipy.stats.find_repeats(X[i])
for t in repnum:
ties += t * (t * t - 1)
# Compute Kendall's W corrected for ties
W = (12 * ssbn - 3 * n**2 * k * (k + 1) ** 2) / (n**2 * k * (k - 1) * (k + 1) - n * ties)
if method == "chisq":
# Compute the Q statistic
Q = n * (k - 1) * W
# Approximate the p-value
ddof1 = k - 1
p_unc = scipy.stats.chi2.sf(Q, ddof1)
# Create output dataframe
stats = pd.DataFrame(
{"Source": within, "W": W, "ddof1": ddof1, "Q": Q, "p-unc": p_unc}, index=["Friedman"]
)
elif method == "f":
# Compute the F statistic
F = W * (n - 1) / (1 - W)
# Approximate the p-value
ddof1 = k - 1 - 2 / n
ddof2 = (n - 1) * ddof1
p_unc = scipy.stats.f.sf(F, ddof1, ddof2)
# Create output dataframe
stats = pd.DataFrame(
{"Source": within, "W": W, "ddof1": ddof1, "ddof2": ddof2, "F": F, "p-unc": p_unc},
index=["Friedman"],
)
return _postprocess_dataframe(stats)
| (data=None, dv=None, within=None, subject=None, method='chisq') |
32,010 | pingouin.distribution | gzscore | Geometric standard (Z) score.
Parameters
----------
x : array_like
Array of raw values.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `x`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the geometric z scores computed for the non-nan values.
Returns
-------
gzscore : array_like
Array of geometric z-scores (same shape as x).
Notes
-----
Geometric Z-scores are better measures of dispersion than arithmetic
z-scores when the sample data come from a log-normally distributed
population [1]_.
Given the raw scores :math:`x`, the geometric mean :math:`\mu_g` and
the geometric standard deviation :math:`\sigma_g`,
the standard score is given by the formula:
.. math:: z = \frac{log(x) - log(\mu_g)}{log(\sigma_g)}
References
----------
.. [1] https://en.wikipedia.org/wiki/Geometric_standard_deviation
Examples
--------
Standardize a lognormal-distributed vector:
>>> import numpy as np
>>> from pingouin import gzscore
>>> np.random.seed(123)
>>> raw = np.random.lognormal(size=100)
>>> z = gzscore(raw)
>>> print(round(z.mean(), 3), round(z.std(), 3))
-0.0 0.995
| def gzscore(x, *, axis=0, ddof=1, nan_policy="propagate"):
"""Geometric standard (Z) score.
Parameters
----------
x : array_like
Array of raw values.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `x`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the geometric z scores computed for the non-nan values.
Returns
-------
gzscore : array_like
Array of geometric z-scores (same shape as x).
Notes
-----
Geometric Z-scores are better measures of dispersion than arithmetic
z-scores when the sample data come from a log-normally distributed
population [1]_.
Given the raw scores :math:`x`, the geometric mean :math:`\\mu_g` and
the geometric standard deviation :math:`\\sigma_g`,
the standard score is given by the formula:
.. math:: z = \\frac{log(x) - log(\\mu_g)}{log(\\sigma_g)}
References
----------
.. [1] https://en.wikipedia.org/wiki/Geometric_standard_deviation
Examples
--------
Standardize a lognormal-distributed vector:
>>> import numpy as np
>>> from pingouin import gzscore
>>> np.random.seed(123)
>>> raw = np.random.lognormal(size=100)
>>> z = gzscore(raw)
>>> print(round(z.mean(), 3), round(z.std(), 3))
-0.0 0.995
"""
warnings.warn(
"gzscore is deprecated and will be removed in pingouin 0.7.0;"
" use scipy.stats.gzscore instead."
)
x = np.asanyarray(x)
log = np.ma.log if isinstance(x, np.ma.MaskedArray) else np.log
z = scipy.stats.zscore(log(x), axis=axis, ddof=ddof, nan_policy=nan_policy)
return z
| (x, *, axis=0, ddof=1, nan_policy='propagate') |
32,011 | pingouin.nonparametric | harrelldavis | Harrell-Davis robust estimate of the :math:`q^{th}` quantile(s) of the
data.
.. versionadded:: 0.2.9
Parameters
----------
x : array_like
Data, must be a one or two-dimensional vector.
quantile : float or array_like
Quantile or sequence of quantiles to compute, must be between 0 and 1.
Default is ``0.5``.
axis : int
Axis along which the MAD is computed. Default is the last axis (-1).
Can be either 0, 1 or -1.
Returns
-------
y : float or array_like
The estimated quantile(s). If ``quantile`` is a single quantile, will
return a float, otherwise will compute each quantile separately and
returns an array of floats.
Notes
-----
The Harrell-Davis method [1]_ estimates the :math:`q^{th}` quantile by a
linear combination of the order statistics. Results have been tested
against a Matlab implementation [2]_. Note that this method is also
used to measure the confidence intervals of the difference between
quantiles of two groups, as implemented in the shift function [3]_.
See Also
--------
plot_shift
References
----------
.. [1] Frank E. Harrell, C. E. Davis, A new distribution-free quantile
estimator, Biometrika, Volume 69, Issue 3, December 1982, Pages
635–640, https://doi.org/10.1093/biomet/69.3.635
.. [2] https://github.com/GRousselet/matlab_stats/blob/master/hd.m
.. [3] Rousselet, G. A., Pernet, C. R. and Wilcox, R. R. (2017). Beyond
differences in means: robust graphical methods to compare two groups
in neuroscience. Eur J Neurosci, 46: 1738-1748.
https://doi.org/doi:10.1111/ejn.13610
Examples
--------
Estimate the 0.5 quantile (i.e median) of 100 observation picked from a
normal distribution with zero mean and unit variance.
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(0, 1, 100)
>>> round(pg.harrelldavis(x, quantile=0.5), 4)
-0.0499
Several quantiles at once
>>> pg.harrelldavis(x, quantile=[0.25, 0.5, 0.75])
array([-0.84133224, -0.04991657, 0.95897233])
On the last axis of a 2D vector (default)
>>> np.random.seed(123)
>>> x = np.random.normal(0, 1, (10, 100))
>>> pg.harrelldavis(x, quantile=[0.25, 0.5, 0.75])
array([[-0.84133224, -0.52346777, -0.81801193, -0.74611216, -0.64928321,
-0.48565262, -0.64332799, -0.8178394 , -0.70058282, -0.73088088],
[-0.04991657, 0.02932655, -0.08905073, -0.1860034 , 0.06970415,
0.15129817, 0.00430958, -0.13784786, -0.08648077, -0.14407123],
[ 0.95897233, 0.49543002, 0.57712236, 0.48620599, 0.85899005,
0.7903462 , 0.76558585, 0.62528436, 0.60421847, 0.52620286]])
On the first axis
>>> pg.harrelldavis(x, quantile=[0.5], axis=0).shape
(100,)
| def harrelldavis(x, quantile=0.5, axis=-1):
"""Harrell-Davis robust estimate of the :math:`q^{th}` quantile(s) of the
data.
.. versionadded:: 0.2.9
Parameters
----------
x : array_like
Data, must be a one or two-dimensional vector.
quantile : float or array_like
Quantile or sequence of quantiles to compute, must be between 0 and 1.
Default is ``0.5``.
axis : int
Axis along which the MAD is computed. Default is the last axis (-1).
Can be either 0, 1 or -1.
Returns
-------
y : float or array_like
The estimated quantile(s). If ``quantile`` is a single quantile, will
return a float, otherwise will compute each quantile separately and
returns an array of floats.
Notes
-----
The Harrell-Davis method [1]_ estimates the :math:`q^{th}` quantile by a
linear combination of the order statistics. Results have been tested
against a Matlab implementation [2]_. Note that this method is also
used to measure the confidence intervals of the difference between
quantiles of two groups, as implemented in the shift function [3]_.
See Also
--------
plot_shift
References
----------
.. [1] Frank E. Harrell, C. E. Davis, A new distribution-free quantile
estimator, Biometrika, Volume 69, Issue 3, December 1982, Pages
635–640, https://doi.org/10.1093/biomet/69.3.635
.. [2] https://github.com/GRousselet/matlab_stats/blob/master/hd.m
.. [3] Rousselet, G. A., Pernet, C. R. and Wilcox, R. R. (2017). Beyond
differences in means: robust graphical methods to compare two groups
in neuroscience. Eur J Neurosci, 46: 1738-1748.
https://doi.org/doi:10.1111/ejn.13610
Examples
--------
Estimate the 0.5 quantile (i.e median) of 100 observation picked from a
normal distribution with zero mean and unit variance.
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(0, 1, 100)
>>> round(pg.harrelldavis(x, quantile=0.5), 4)
-0.0499
Several quantiles at once
>>> pg.harrelldavis(x, quantile=[0.25, 0.5, 0.75])
array([-0.84133224, -0.04991657, 0.95897233])
On the last axis of a 2D vector (default)
>>> np.random.seed(123)
>>> x = np.random.normal(0, 1, (10, 100))
>>> pg.harrelldavis(x, quantile=[0.25, 0.5, 0.75])
array([[-0.84133224, -0.52346777, -0.81801193, -0.74611216, -0.64928321,
-0.48565262, -0.64332799, -0.8178394 , -0.70058282, -0.73088088],
[-0.04991657, 0.02932655, -0.08905073, -0.1860034 , 0.06970415,
0.15129817, 0.00430958, -0.13784786, -0.08648077, -0.14407123],
[ 0.95897233, 0.49543002, 0.57712236, 0.48620599, 0.85899005,
0.7903462 , 0.76558585, 0.62528436, 0.60421847, 0.52620286]])
On the first axis
>>> pg.harrelldavis(x, quantile=[0.5], axis=0).shape
(100,)
"""
x = np.asarray(x)
assert x.ndim <= 2, "Only 1D or 2D array are supported for this function."
assert axis in [0, 1, -1], "Axis must be 0, 1 or -1."
# Sort the input array
x = np.sort(x, axis=axis)
n = x.shape[axis]
vec = np.arange(n)
if isinstance(quantile, float):
quantile = [quantile]
y = []
for q in quantile:
# Harrell-Davis estimate of the qth quantile
m1 = (n + 1) * q
m2 = (n + 1) * (1 - q)
w = scipy.stats.beta.cdf((vec + 1) / n, m1, m2) - scipy.stats.beta.cdf((vec) / n, m1, m2)
if axis != 0:
y.append((w * x).sum(axis))
else:
y.append((w[..., None] * x).sum(axis)) # Store results
if len(y) == 1:
y = y[0] # Return a float instead of a list if n quantile is 1
else:
y = np.array(y)
return y
| (x, quantile=0.5, axis=-1) |
32,012 | pingouin.distribution | homoscedasticity | Test equality of variance.
Parameters
----------
data : :py:class:`pandas.DataFrame`, list or dict
Iterable. Can be either a list / dictionnary of iterables
or a wide- or long-format pandas dataframe.
dv : str
Dependent variable (only when ``data`` is a long-format dataframe).
group : str
Grouping variable (only when ``data`` is a long-format dataframe).
method : str
Statistical test. `'levene'` (default) performs the Levene test
using :py:func:`scipy.stats.levene`, and `'bartlett'` performs the
Bartlett test using :py:func:`scipy.stats.bartlett`.
The former is more robust to departure from normality.
alpha : float
Significance level.
**kwargs : optional
Optional argument(s) passed to the lower-level :py:func:`scipy.stats.levene` function.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'W/T'``: Test statistic ('W' for Levene, 'T' for Bartlett)
* ``'pval'``: p-value
* ``'equal_var'``: True if ``data`` has equal variance
See Also
--------
normality : Univariate normality test.
sphericity : Mauchly's test for sphericity.
Notes
-----
The **Bartlett** :math:`T` statistic [1]_ is defined as:
.. math::
T = \frac{(N-k) \ln{s^{2}_{p}} - \sum_{i=1}^{k}(N_{i} - 1)
\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\sum_{i=1}^{k}{1/(N_{i} - 1))}
- 1/(N-k))}
where :math:`s_i^2` is the variance of the :math:`i^{th}` group,
:math:`N` is the total sample size, :math:`N_i` is the sample size of the
:math:`i^{th}` group, :math:`k` is the number of groups,
and :math:`s_p^2` is the pooled variance.
The pooled variance is a weighted average of the group variances and is
defined as:
.. math:: s^{2}_{p} = \sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k)
The p-value is then computed using a chi-square distribution:
.. math:: T \sim \chi^2(k-1)
The **Levene** :math:`W` statistic [2]_ is defined as:
.. math::
W = \frac{(N-k)} {(k-1)}
\frac{\sum_{i=1}^{k}N_{i}(\overline{Z}_{i.}-\overline{Z})^{2} }
{\sum_{i=1}^{k}\sum_{j=1}^{N_i}(Z_{ij}-\overline{Z}_{i.})^{2} }
where :math:`Z_{ij} = |Y_{ij} - \text{median}({Y}_{i.})|`,
:math:`\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and
:math:`\overline{Z}` is the grand mean of :math:`Z_{ij}`.
The p-value is then computed using a F-distribution:
.. math:: W \sim F(k-1, N-k)
.. warning:: Missing values are not supported for this function.
Make sure to remove them before using the
:py:meth:`pandas.DataFrame.dropna` or :py:func:`pingouin.remove_na`
functions.
References
----------
.. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical
tests. Proc. R. Soc. Lond. A, 160(901), 268-282.
.. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the
equality of variances. Journal of the American Statistical
Association, 69(346), 364-367.
Examples
--------
1. Levene test on a wide-format dataframe
>>> import numpy as np
>>> import pingouin as pg
>>> data = pg.read_dataset('mediation')
>>> pg.homoscedasticity(data[['X', 'Y', 'M']])
W pval equal_var
levene 1.173518 0.310707 True
2. Same data but using a long-format dataframe
>>> data_long = data[['X', 'Y', 'M']].melt()
>>> pg.homoscedasticity(data_long, dv="value", group="variable")
W pval equal_var
levene 1.173518 0.310707 True
3. Same but using a mean center
>>> pg.homoscedasticity(data_long, dv="value", group="variable", center="mean")
W pval equal_var
levene 1.572239 0.209303 True
4. Bartlett test using a list of iterables
>>> data = [[4, 8, 9, 20, 14], np.array([5, 8, 15, 45, 12])]
>>> pg.homoscedasticity(data, method="bartlett", alpha=.05)
T pval equal_var
bartlett 2.873569 0.090045 True
| def homoscedasticity(data, dv=None, group=None, method="levene", alpha=0.05, **kwargs):
"""Test equality of variance.
Parameters
----------
data : :py:class:`pandas.DataFrame`, list or dict
Iterable. Can be either a list / dictionnary of iterables
or a wide- or long-format pandas dataframe.
dv : str
Dependent variable (only when ``data`` is a long-format dataframe).
group : str
Grouping variable (only when ``data`` is a long-format dataframe).
method : str
Statistical test. `'levene'` (default) performs the Levene test
using :py:func:`scipy.stats.levene`, and `'bartlett'` performs the
Bartlett test using :py:func:`scipy.stats.bartlett`.
The former is more robust to departure from normality.
alpha : float
Significance level.
**kwargs : optional
Optional argument(s) passed to the lower-level :py:func:`scipy.stats.levene` function.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'W/T'``: Test statistic ('W' for Levene, 'T' for Bartlett)
* ``'pval'``: p-value
* ``'equal_var'``: True if ``data`` has equal variance
See Also
--------
normality : Univariate normality test.
sphericity : Mauchly's test for sphericity.
Notes
-----
The **Bartlett** :math:`T` statistic [1]_ is defined as:
.. math::
T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1)
\\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))}
- 1/(N-k))}
where :math:`s_i^2` is the variance of the :math:`i^{th}` group,
:math:`N` is the total sample size, :math:`N_i` is the sample size of the
:math:`i^{th}` group, :math:`k` is the number of groups,
and :math:`s_p^2` is the pooled variance.
The pooled variance is a weighted average of the group variances and is
defined as:
.. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k)
The p-value is then computed using a chi-square distribution:
.. math:: T \\sim \\chi^2(k-1)
The **Levene** :math:`W` statistic [2]_ is defined as:
.. math::
W = \\frac{(N-k)} {(k-1)}
\\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} }
{\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} }
where :math:`Z_{ij} = |Y_{ij} - \\text{median}({Y}_{i.})|`,
:math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and
:math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`.
The p-value is then computed using a F-distribution:
.. math:: W \\sim F(k-1, N-k)
.. warning:: Missing values are not supported for this function.
Make sure to remove them before using the
:py:meth:`pandas.DataFrame.dropna` or :py:func:`pingouin.remove_na`
functions.
References
----------
.. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical
tests. Proc. R. Soc. Lond. A, 160(901), 268-282.
.. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the
equality of variances. Journal of the American Statistical
Association, 69(346), 364-367.
Examples
--------
1. Levene test on a wide-format dataframe
>>> import numpy as np
>>> import pingouin as pg
>>> data = pg.read_dataset('mediation')
>>> pg.homoscedasticity(data[['X', 'Y', 'M']])
W pval equal_var
levene 1.173518 0.310707 True
2. Same data but using a long-format dataframe
>>> data_long = data[['X', 'Y', 'M']].melt()
>>> pg.homoscedasticity(data_long, dv="value", group="variable")
W pval equal_var
levene 1.173518 0.310707 True
3. Same but using a mean center
>>> pg.homoscedasticity(data_long, dv="value", group="variable", center="mean")
W pval equal_var
levene 1.572239 0.209303 True
4. Bartlett test using a list of iterables
>>> data = [[4, 8, 9, 20, 14], np.array([5, 8, 15, 45, 12])]
>>> pg.homoscedasticity(data, method="bartlett", alpha=.05)
T pval equal_var
bartlett 2.873569 0.090045 True
"""
assert isinstance(data, (pd.DataFrame, list, dict))
assert method.lower() in ["levene", "bartlett"]
func = getattr(scipy.stats, method)
if isinstance(data, pd.DataFrame):
# Data is a Pandas DataFrame
if dv is None and group is None:
# Wide-format
# Get numeric data only
numdata = data._get_numeric_data()
assert numdata.shape[1] > 1, "Data must have at least two columns."
statistic, p = func(*numdata.to_numpy().T, **kwargs)
else:
# Long-format
assert group in data.columns
assert dv in data.columns
grp = data.groupby(group, observed=True)[dv]
assert grp.ngroups > 1, "Data must have at least two columns."
statistic, p = func(*grp.apply(list), **kwargs)
elif isinstance(data, list):
# Check that list contains other list or np.ndarray
assert all(isinstance(el, (list, np.ndarray)) for el in data)
assert len(data) > 1, "Data must have at least two iterables."
statistic, p = func(*data, **kwargs)
else:
# Data is a dict
assert all(isinstance(el, (list, np.ndarray)) for el in data.values())
assert len(data) > 1, "Data must have at least two iterables."
statistic, p = func(*data.values(), **kwargs)
equal_var = True if p > alpha else False
stat_name = "W" if method.lower() == "levene" else "T"
stats = pd.DataFrame({stat_name: statistic, "pval": p, "equal_var": equal_var}, index=[method])
return _postprocess_dataframe(stats)
| (data, dv=None, group=None, method='levene', alpha=0.05, **kwargs) |
32,013 | pingouin.reliability | intraclass_corr | Intraclass correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Long-format dataframe. Data must be fully balanced.
targets : string
Name of column in ``data`` containing the targets.
raters : string
Name of column in ``data`` containing the raters.
ratings : string
Name of column in ``data`` containing the ratings.
nan_policy : str
Defines how to handle when input contains missing values (nan).
`'raise'` (default) throws an error, `'omit'` performs the calculations
after deleting target(s) with one or more missing values (= listwise
deletion).
.. versionadded:: 0.3.0
Returns
-------
stats : :py:class:`pandas.DataFrame`
Output dataframe:
* ``'Type'``: ICC type
* ``'Description'``: description of the ICC
* ``'ICC'``: intraclass correlation
* ``'F'``: F statistic
* ``'df1'``: numerator degree of freedom
* ``'df2'``: denominator degree of freedom
* ``'pval'``: p-value
* ``'CI95%'``: 95% confidence intervals around the ICC
Notes
-----
The intraclass correlation (ICC, [1]_) assesses the reliability of ratings
by comparing the variability of different ratings of the same subject to
the total variation across all ratings and all subjects.
Shrout and Fleiss (1979) [2]_ describe six cases of reliability of ratings
done by :math:`k` raters on :math:`n` targets. Pingouin returns all six
cases with corresponding F and p-values, as well as 95% confidence
intervals.
From the documentation of the ICC function in the `psych
<https://cran.r-project.org/web/packages/psych/psych.pdf>`_ R package:
- **ICC1**: Each target is rated by a different rater and the raters are
selected at random. This is a one-way ANOVA fixed effects model.
- **ICC2**: A random sample of :math:`k` raters rate each target. The
measure is one of absolute agreement in the ratings. ICC1 is sensitive
to differences in means between raters and is a measure of absolute
agreement.
- **ICC3**: A fixed set of :math:`k` raters rate each target. There is no
generalization to a larger population of raters. ICC2 and ICC3 remove
mean differences between raters, but are sensitive to interactions.
The difference between ICC2 and ICC3 is whether raters are seen as fixed
or random effects.
Then, for each of these cases, the reliability can either be estimated for
a single rating or for the average of :math:`k` ratings. The 1 rating case
is equivalent to the average intercorrelation, while the :math:`k` rating
case is equivalent to the Spearman Brown adjusted reliability.
**ICC1k**, **ICC2k**, **ICC3K** reflect the means of :math:`k` raters.
This function has been tested against the ICC function of the R psych
package. Note however that contrarily to the R implementation, the
current implementation does not use linear mixed effect but regular ANOVA,
which means that it only works with complete-case data (no missing values).
References
----------
.. [1] http://www.real-statistics.com/reliability/intraclass-correlation/
.. [2] Shrout, P. E., & Fleiss, J. L. (1979). Intraclass correlations:
uses in assessing rater reliability. Psychological bulletin, 86(2),
420.
Examples
--------
ICCs of wine quality assessed by 4 judges.
>>> import pingouin as pg
>>> data = pg.read_dataset('icc')
>>> icc = pg.intraclass_corr(data=data, targets='Wine', raters='Judge',
... ratings='Scores').round(3)
>>> icc.set_index("Type")
Description ICC F df1 df2 pval CI95%
Type
ICC1 Single raters absolute 0.728 11.680 7 24 0.0 [0.43, 0.93]
ICC2 Single random raters 0.728 11.787 7 21 0.0 [0.43, 0.93]
ICC3 Single fixed raters 0.729 11.787 7 21 0.0 [0.43, 0.93]
ICC1k Average raters absolute 0.914 11.680 7 24 0.0 [0.75, 0.98]
ICC2k Average random raters 0.914 11.787 7 21 0.0 [0.75, 0.98]
ICC3k Average fixed raters 0.915 11.787 7 21 0.0 [0.75, 0.98]
| def intraclass_corr(data=None, targets=None, raters=None, ratings=None, nan_policy="raise"):
"""Intraclass correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Long-format dataframe. Data must be fully balanced.
targets : string
Name of column in ``data`` containing the targets.
raters : string
Name of column in ``data`` containing the raters.
ratings : string
Name of column in ``data`` containing the ratings.
nan_policy : str
Defines how to handle when input contains missing values (nan).
`'raise'` (default) throws an error, `'omit'` performs the calculations
after deleting target(s) with one or more missing values (= listwise
deletion).
.. versionadded:: 0.3.0
Returns
-------
stats : :py:class:`pandas.DataFrame`
Output dataframe:
* ``'Type'``: ICC type
* ``'Description'``: description of the ICC
* ``'ICC'``: intraclass correlation
* ``'F'``: F statistic
* ``'df1'``: numerator degree of freedom
* ``'df2'``: denominator degree of freedom
* ``'pval'``: p-value
* ``'CI95%'``: 95% confidence intervals around the ICC
Notes
-----
The intraclass correlation (ICC, [1]_) assesses the reliability of ratings
by comparing the variability of different ratings of the same subject to
the total variation across all ratings and all subjects.
Shrout and Fleiss (1979) [2]_ describe six cases of reliability of ratings
done by :math:`k` raters on :math:`n` targets. Pingouin returns all six
cases with corresponding F and p-values, as well as 95% confidence
intervals.
From the documentation of the ICC function in the `psych
<https://cran.r-project.org/web/packages/psych/psych.pdf>`_ R package:
- **ICC1**: Each target is rated by a different rater and the raters are
selected at random. This is a one-way ANOVA fixed effects model.
- **ICC2**: A random sample of :math:`k` raters rate each target. The
measure is one of absolute agreement in the ratings. ICC1 is sensitive
to differences in means between raters and is a measure of absolute
agreement.
- **ICC3**: A fixed set of :math:`k` raters rate each target. There is no
generalization to a larger population of raters. ICC2 and ICC3 remove
mean differences between raters, but are sensitive to interactions.
The difference between ICC2 and ICC3 is whether raters are seen as fixed
or random effects.
Then, for each of these cases, the reliability can either be estimated for
a single rating or for the average of :math:`k` ratings. The 1 rating case
is equivalent to the average intercorrelation, while the :math:`k` rating
case is equivalent to the Spearman Brown adjusted reliability.
**ICC1k**, **ICC2k**, **ICC3K** reflect the means of :math:`k` raters.
This function has been tested against the ICC function of the R psych
package. Note however that contrarily to the R implementation, the
current implementation does not use linear mixed effect but regular ANOVA,
which means that it only works with complete-case data (no missing values).
References
----------
.. [1] http://www.real-statistics.com/reliability/intraclass-correlation/
.. [2] Shrout, P. E., & Fleiss, J. L. (1979). Intraclass correlations:
uses in assessing rater reliability. Psychological bulletin, 86(2),
420.
Examples
--------
ICCs of wine quality assessed by 4 judges.
>>> import pingouin as pg
>>> data = pg.read_dataset('icc')
>>> icc = pg.intraclass_corr(data=data, targets='Wine', raters='Judge',
... ratings='Scores').round(3)
>>> icc.set_index("Type")
Description ICC F df1 df2 pval CI95%
Type
ICC1 Single raters absolute 0.728 11.680 7 24 0.0 [0.43, 0.93]
ICC2 Single random raters 0.728 11.787 7 21 0.0 [0.43, 0.93]
ICC3 Single fixed raters 0.729 11.787 7 21 0.0 [0.43, 0.93]
ICC1k Average raters absolute 0.914 11.680 7 24 0.0 [0.75, 0.98]
ICC2k Average random raters 0.914 11.787 7 21 0.0 [0.75, 0.98]
ICC3k Average fixed raters 0.915 11.787 7 21 0.0 [0.75, 0.98]
"""
from pingouin import anova
# Safety check
assert isinstance(data, pd.DataFrame), "data must be a dataframe."
assert all([v is not None for v in [targets, raters, ratings]])
assert all([v in data.columns for v in [targets, raters, ratings]])
assert nan_policy in ["omit", "raise"]
# Convert data to wide-format
data = data.pivot_table(index=targets, columns=raters, values=ratings, observed=True)
# Listwise deletion of missing values
nan_present = data.isna().any().any()
if nan_present:
if nan_policy == "omit":
data = data.dropna(axis=0, how="any")
else:
raise ValueError(
"Either missing values are present in data or "
"data are unbalanced. Please remove them "
"manually or use nan_policy='omit'."
)
# Back to long-format
# data_wide = data.copy() # Optional, for PCA
data = data.reset_index().melt(id_vars=targets, value_name=ratings)
# Check that ratings is a numeric variable
assert data[ratings].dtype.kind in "bfiu", "Ratings must be numeric."
# Check that data are fully balanced
# This behavior is ensured by the long-to-wide-to-long transformation
# Unbalanced data will result in rows with missing values.
# assert data.groupby(raters)[ratings].count().nunique() == 1
# Extract sizes
k = data[raters].nunique()
n = data[targets].nunique()
# Two-way ANOVA
with np.errstate(invalid="ignore"):
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
aov = anova(data=data, dv=ratings, between=[targets, raters], ss_type=2)
options.update(old_options) # restore options
# Extract mean squares
msb = aov.at[0, "MS"]
msw = (aov.at[1, "SS"] + aov.at[2, "SS"]) / (aov.at[1, "DF"] + aov.at[2, "DF"])
msj = aov.at[1, "MS"]
mse = aov.at[2, "MS"]
# Calculate ICCs
icc1 = (msb - msw) / (msb + (k - 1) * msw)
icc2 = (msb - mse) / (msb + (k - 1) * mse + k * (msj - mse) / n)
icc3 = (msb - mse) / (msb + (k - 1) * mse)
icc1k = (msb - msw) / msb
icc2k = (msb - mse) / (msb + (msj - mse) / n)
icc3k = (msb - mse) / msb
# Calculate F, df, and p-values
f1k = msb / msw
df1 = n - 1
df1kd = n * (k - 1)
p1k = f.sf(f1k, df1, df1kd)
f2k = f3k = msb / mse
df2kd = (n - 1) * (k - 1)
p2k = f.sf(f2k, df1, df2kd)
# Create output dataframe
stats = {
"Type": ["ICC1", "ICC2", "ICC3", "ICC1k", "ICC2k", "ICC3k"],
"Description": [
"Single raters absolute",
"Single random raters",
"Single fixed raters",
"Average raters absolute",
"Average random raters",
"Average fixed raters",
],
"ICC": [icc1, icc2, icc3, icc1k, icc2k, icc3k],
"F": [f1k, f2k, f2k, f1k, f2k, f2k],
"df1": n - 1,
"df2": [df1kd, df2kd, df2kd, df1kd, df2kd, df2kd],
"pval": [p1k, p2k, p2k, p1k, p2k, p2k],
}
stats = pd.DataFrame(stats)
# Calculate confidence intervals
alpha = 0.05
# Case 1 and 3
f1l = f1k / f.ppf(1 - alpha / 2, df1, df1kd)
f1u = f1k * f.ppf(1 - alpha / 2, df1kd, df1)
l1 = (f1l - 1) / (f1l + (k - 1))
u1 = (f1u - 1) / (f1u + (k - 1))
f3l = f3k / f.ppf(1 - alpha / 2, df1, df2kd)
f3u = f3k * f.ppf(1 - alpha / 2, df2kd, df1)
l3 = (f3l - 1) / (f3l + (k - 1))
u3 = (f3u - 1) / (f3u + (k - 1))
# Case 2
fj = msj / mse
vn = df2kd * (k * icc2 * fj + n * (1 + (k - 1) * icc2) - k * icc2) ** 2
vd = df1 * k**2 * icc2**2 * fj**2 + (n * (1 + (k - 1) * icc2) - k * icc2) ** 2
v = vn / vd
f2u = f.ppf(1 - alpha / 2, n - 1, v)
f2l = f.ppf(1 - alpha / 2, v, n - 1)
l2 = n * (msb - f2u * mse) / (f2u * (k * msj + (k * n - k - n) * mse) + n * msb)
u2 = n * (f2l * msb - mse) / (k * msj + (k * n - k - n) * mse + n * f2l * msb)
stats["CI95%"] = [
np.array([l1, u1]),
np.array([l2, u2]),
np.array([l3, u3]),
np.array([1 - 1 / f1l, 1 - 1 / f1u]),
np.array([l2 * k / (1 + l2 * (k - 1)), u2 * k / (1 + u2 * (k - 1))]),
np.array([1 - 1 / f3l, 1 - 1 / f3u]),
]
return _postprocess_dataframe(stats)
| (data=None, targets=None, raters=None, ratings=None, nan_policy='raise') |
32,014 | pingouin.nonparametric | kruskal | Kruskal-Wallis H-test for independent samples.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame
dv : string
Name of column containing the dependent variable.
between : string
Name of column containing the between factor.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'H'``: The Kruskal-Wallis H statistic, corrected for ties
* ``'p-unc'``: Uncorrected p-value
* ``'dof'``: degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(data=df, dv='Pain threshold', between='Hair color')
Source ddof1 H p-unc
Kruskal Hair color 3 10.58863 0.014172
| def kruskal(data=None, dv=None, between=None, detailed=False):
"""Kruskal-Wallis H-test for independent samples.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame
dv : string
Name of column containing the dependent variable.
between : string
Name of column containing the between factor.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'H'``: The Kruskal-Wallis H statistic, corrected for ties
* ``'p-unc'``: Uncorrected p-value
* ``'dof'``: degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(data=df, dv='Pain threshold', between='Hair color')
Source ddof1 H p-unc
Kruskal Hair color 3 10.58863 0.014172
"""
# Check data
data = _check_dataframe(dv=dv, between=between, data=data, effects="between")
# Remove NaN values
data = data[[dv, between]].dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract number of groups and total sample size
n_groups = data[between].nunique()
n = data[dv].size
# Rank data, dealing with ties appropriately
data["rank"] = scipy.stats.rankdata(data[dv])
# Find the total of rank per groups
grp = data.groupby(between, observed=True)["rank"]
sum_rk_grp = grp.sum().to_numpy()
n_per_grp = grp.count().to_numpy()
# Calculate chi-square statistic (H)
H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1)
# Correct for ties
H /= scipy.stats.tiecorrect(data["rank"].to_numpy())
# Calculate DOF and p-value
ddof1 = n_groups - 1
p_unc = scipy.stats.chi2.sf(H, ddof1)
# Create output dataframe
stats = pd.DataFrame(
{
"Source": between,
"ddof1": ddof1,
"H": H,
"p-unc": p_unc,
},
index=["Kruskal"],
)
return _postprocess_dataframe(stats)
| (data=None, dv=None, between=None, detailed=False) |
32,015 | pingouin.regression | linear_regression | (Multiple) Linear regression.
Parameters
----------
X : array_like
Predictor(s), of shape *(n_samples, n_features)* or *(n_samples)*.
y : array_like
Dependent variable, of shape *(n_samples)*.
add_intercept : bool
If False, assume that the data are already centered. If True, add a
constant term to the model. In this case, the first value in the
output dict is the intercept of the model.
.. note:: It is generally recommended to include a constant term
(intercept) to the model to limit the bias and force the residual
mean to equal zero. The intercept coefficient and p-values
are however rarely meaningful.
weights : array_like
An optional vector of sample weights to be used in the fitting
process, of shape *(n_samples)*. Missing or negative weights are not
allowed. If not null, a weighted least squares is calculated.
.. versionadded:: 0.3.5
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
:math:`\text{CI} = [\alpha / 2 ; 1 - \alpha / 2]`
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed). Default is False, which will raise an error if missing
values are present in either the predictor(s) or dependent
variable.
relimp : bool
If True, returns the relative importance (= contribution) of
predictors. This is irrelevant when the predictors are uncorrelated:
the total :math:`R^2` of the model is simply the sum of each univariate
regression :math:`R^2`-values. However, this does not apply when
predictors are correlated. Instead, the total :math:`R^2` of the model
is partitioned by averaging over all combinations of predictors,
as done in the `relaimpo
<https://cran.r-project.org/web/packages/relaimpo/relaimpo.pdf>`_
R package (``calc.relimp(type="lmg")``).
.. warning:: The computation time roughly doubles for each
additional predictor and therefore this can be extremely slow for
models with more than 12-15 predictors.
.. versionadded:: 0.3.0
Returns
-------
stats : :py:class:`pandas.DataFrame` or dict
Linear regression summary:
* ``'names'``: name of variable(s) in the model (e.g. x1, x2...)
* ``'coef'``: regression coefficients
* ``'se'``: standard errors
* ``'T'``: T-values
* ``'pval'``: p-values
* ``'r2'``: coefficient of determination (:math:`R^2`)
* ``'adj_r2'``: adjusted :math:`R^2`
* ``'CI[2.5%]'``: lower confidence intervals
* ``'CI[97.5%]'``: upper confidence intervals
* ``'relimp'``: relative contribution of each predictor to the final :math:`R^2` (only if ``relimp=True``).
* ``'relimp_perc'``: percent relative contribution
In addition, the output dataframe comes with hidden attributes such as
the residuals, and degrees of freedom of the model and residuals, which
can be accessed as follow, respectively:
>>> lm = pg.linear_regression() # doctest: +SKIP
>>> lm.residuals_, lm.df_model_, lm.df_resid_ # doctest: +SKIP
Note that to follow scikit-learn convention, these hidden atributes end
with an "_". When ``as_dataframe=False`` however, these attributes
are no longer hidden and can be accessed as any other keys in the
output dictionary.
>>> lm = pg.linear_regression() # doctest: +SKIP
>>> lm['residuals'], lm['df_model'], lm['df_resid'] # doctest: +SKIP
When ``as_dataframe=False`` the dictionary also contains the
processed ``X`` and ``y`` arrays (i.e, with NaNs removed if
``remove_na=True``) and the model's predicted values ``pred``.
>>> lm['X'], lm['y'], lm['pred'] # doctest: +SKIP
For a weighted least squares fit, the weighted ``Xw`` and ``yw``
arrays are included in the dictionary.
>>> lm['Xw'], lm['yw'] # doctest: +SKIP
See also
--------
logistic_regression, mediation_analysis, corr
Notes
-----
The :math:`\beta` coefficients are estimated using an ordinary least
squares (OLS) regression, as implemented in the
:py:func:`scipy.linalg.lstsq` function. The OLS method minimizes
the sum of squared residuals, and leads to a closed-form expression for
the estimated :math:`\beta`:
.. math:: \hat{\beta} = (X^TX)^{-1} X^Ty
It is generally recommended to include a constant term (intercept) to the
model to limit the bias and force the residual mean to equal zero.
Note that intercept coefficient and p-values are however rarely meaningful.
The standard error of the estimates is a measure of the accuracy of the
prediction defined as:
.. math:: \sigma = \sqrt{\text{MSE} \cdot (X^TX)^{-1}}
where :math:`\text{MSE}` is the mean squared error,
.. math::
\text{MSE} = \frac{SS_{\text{resid}}}{n - p - 1}
= \frac{\sum{(\text{true} - \text{pred})^2}}{n - p - 1}
:math:`p` is the total number of predictor variables in the model
(excluding the intercept) and :math:`n` is the sample size.
Using the :math:`\beta` coefficients and the standard errors,
the T-values can be obtained:
.. math:: T = \frac{\beta}{\sigma}
and the p-values approximated using a T-distribution with
:math:`n - p - 1` degrees of freedom.
The coefficient of determination (:math:`R^2`) is defined as:
.. math:: R^2 = 1 - (\frac{SS_{\text{resid}}}{SS_{\text{total}}})
The adjusted :math:`R^2` is defined as:
.. math:: \overline{R}^2 = 1 - (1 - R^2) \frac{n - 1}{n - p - 1}
The relative importance (``relimp``) column is a partitioning of the
total :math:`R^2` of the model into individual :math:`R^2` contribution.
This is calculated by taking the average over average contributions in
models of different sizes. For more details, please refer to
`Groemping et al. 2006 <http://dx.doi.org/10.18637/jss.v017.i01>`_
and the R package `relaimpo
<https://cran.r-project.org/web/packages/relaimpo/relaimpo.pdf>`_.
Note that Pingouin will automatically remove any duplicate columns
from :math:`X`, as well as any column with only one unique value
(constant), excluding the intercept.
Results have been compared against sklearn, R, statsmodels and JASP.
Examples
--------
1. Simple linear regression using columns of a pandas dataframe
In this first example, we'll use the tips dataset to see how well we
can predict the waiter's tip (in dollars) based on the total bill (also
in dollars).
>>> import numpy as np
>>> import pingouin as pg
>>> df = pg.read_dataset('tips')
>>> # Let's predict the tip ($) based on the total bill (also in $)
>>> lm = pg.linear_regression(df['total_bill'], df['tip'])
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 0.92 0.16 5.76 0.0 0.46 0.45 0.61 1.23
1 total_bill 0.11 0.01 14.26 0.0 0.46 0.45 0.09 0.12
It comes as no surprise that total bill is indeed a significant predictor
of the waiter's tip (T=14.26, p<0.05). The :math:`R^2` of the model is 0.46
and the adjusted :math:`R^2` is 0.45, which means that our model roughly
explains ~45% of the total variance in the tip amount.
2. Multiple linear regression
We can also have more than one predictor and run a multiple linear
regression. Below, we add the party size as a second predictor of tip.
>>> # We'll add a second predictor: the party size
>>> lm = pg.linear_regression(df[['total_bill', 'size']], df['tip'])
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 0.67 0.19 3.46 0.00 0.47 0.46 0.29 1.05
1 total_bill 0.09 0.01 10.17 0.00 0.47 0.46 0.07 0.11
2 size 0.19 0.09 2.26 0.02 0.47 0.46 0.02 0.36
The party size is also a significant predictor of tip (T=2.26, p=0.02).
Note that adding this new predictor however only improved the :math:`R^2`
of our model by ~1%.
This function also works with numpy arrays:
>>> X = df[['total_bill', 'size']].to_numpy()
>>> y = df['tip'].to_numpy()
>>> pg.linear_regression(X, y).round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 0.67 0.19 3.46 0.00 0.47 0.46 0.29 1.05
1 x1 0.09 0.01 10.17 0.00 0.47 0.46 0.07 0.11
2 x2 0.19 0.09 2.26 0.02 0.47 0.46 0.02 0.36
3. Get the residuals
>>> # For clarity, only display the first 9 values
>>> np.round(lm.residuals_, 2)[:9]
array([-1.62, -0.55, 0.31, 0.06, -0.11, 0.93, 0.13, -0.81, -0.49])
Using pandas, we can show a summary of the distribution of the residuals:
>>> import pandas as pd
>>> pd.Series(lm.residuals_).describe().round(2)
count 244.00
mean -0.00
std 1.01
min -2.93
25% -0.55
50% -0.09
75% 0.51
max 4.04
dtype: float64
5. No intercept and return only the regression coefficients
Sometimes it may be useful to remove the constant term from the regression,
or to only return the regression coefficients without calculating the
standard errors or p-values. This latter can potentially save you a lot of
time if you need to calculate hundreds of regression and only care about
the coefficients!
>>> pg.linear_regression(X, y, add_intercept=False, coef_only=True)
array([0.1007119 , 0.36209717])
6. Return a dictionnary instead of a dataframe
>>> lm_dict = pg.linear_regression(X, y, as_dataframe=False)
>>> lm_dict.keys()
dict_keys(['names', 'coef', 'se', 'T', 'pval', 'r2', 'adj_r2', 'CI[2.5%]',
'CI[97.5%]', 'df_model', 'df_resid', 'residuals', 'X', 'y',
'pred'])
7. Remove missing values
>>> X[4, 1] = np.nan
>>> y[7] = np.nan
>>> pg.linear_regression(X, y, remove_na=True, coef_only=True)
array([0.65749955, 0.09262059, 0.19927529])
8. Get the relative importance of predictors
>>> lm = pg.linear_regression(X, y, remove_na=True, relimp=True)
>>> lm[['names', 'relimp', 'relimp_perc']]
names relimp relimp_perc
0 Intercept NaN NaN
1 x1 0.342503 73.045583
2 x2 0.126386 26.954417
The ``relimp`` column is a partitioning of the total :math:`R^2` of the
model into individual contribution. Therefore, it sums to the :math:`R^2`
of the full model. The ``relimp_perc`` is normalized to sum to 100%. See
`Groemping 2006 <https://www.jstatsoft.org/article/view/v017i01>`_
for more details.
>>> lm[['relimp', 'relimp_perc']].sum()
relimp 0.468889
relimp_perc 100.000000
dtype: float64
9. Weighted linear regression
>>> X = [1, 2, 3, 4, 5, 6]
>>> y = [10, 22, 11, 13, 13, 16]
>>> w = [1, 0.1, 1, 1, 0.5, 1] # Array of weights. Must be >= 0.
>>> lm = pg.linear_regression(X, y, weights=w)
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 9.00 2.03 4.42 0.01 0.51 0.39 3.35 14.64
1 x1 1.04 0.50 2.06 0.11 0.51 0.39 -0.36 2.44
| def linear_regression(
X,
y,
add_intercept=True,
weights=None,
coef_only=False,
alpha=0.05,
as_dataframe=True,
remove_na=False,
relimp=False,
):
"""(Multiple) Linear regression.
Parameters
----------
X : array_like
Predictor(s), of shape *(n_samples, n_features)* or *(n_samples)*.
y : array_like
Dependent variable, of shape *(n_samples)*.
add_intercept : bool
If False, assume that the data are already centered. If True, add a
constant term to the model. In this case, the first value in the
output dict is the intercept of the model.
.. note:: It is generally recommended to include a constant term
(intercept) to the model to limit the bias and force the residual
mean to equal zero. The intercept coefficient and p-values
are however rarely meaningful.
weights : array_like
An optional vector of sample weights to be used in the fitting
process, of shape *(n_samples)*. Missing or negative weights are not
allowed. If not null, a weighted least squares is calculated.
.. versionadded:: 0.3.5
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
:math:`\\text{CI} = [\\alpha / 2 ; 1 - \\alpha / 2]`
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed). Default is False, which will raise an error if missing
values are present in either the predictor(s) or dependent
variable.
relimp : bool
If True, returns the relative importance (= contribution) of
predictors. This is irrelevant when the predictors are uncorrelated:
the total :math:`R^2` of the model is simply the sum of each univariate
regression :math:`R^2`-values. However, this does not apply when
predictors are correlated. Instead, the total :math:`R^2` of the model
is partitioned by averaging over all combinations of predictors,
as done in the `relaimpo
<https://cran.r-project.org/web/packages/relaimpo/relaimpo.pdf>`_
R package (``calc.relimp(type="lmg")``).
.. warning:: The computation time roughly doubles for each
additional predictor and therefore this can be extremely slow for
models with more than 12-15 predictors.
.. versionadded:: 0.3.0
Returns
-------
stats : :py:class:`pandas.DataFrame` or dict
Linear regression summary:
* ``'names'``: name of variable(s) in the model (e.g. x1, x2...)
* ``'coef'``: regression coefficients
* ``'se'``: standard errors
* ``'T'``: T-values
* ``'pval'``: p-values
* ``'r2'``: coefficient of determination (:math:`R^2`)
* ``'adj_r2'``: adjusted :math:`R^2`
* ``'CI[2.5%]'``: lower confidence intervals
* ``'CI[97.5%]'``: upper confidence intervals
* ``'relimp'``: relative contribution of each predictor to the final\
:math:`R^2` (only if ``relimp=True``).
* ``'relimp_perc'``: percent relative contribution
In addition, the output dataframe comes with hidden attributes such as
the residuals, and degrees of freedom of the model and residuals, which
can be accessed as follow, respectively:
>>> lm = pg.linear_regression() # doctest: +SKIP
>>> lm.residuals_, lm.df_model_, lm.df_resid_ # doctest: +SKIP
Note that to follow scikit-learn convention, these hidden atributes end
with an "_". When ``as_dataframe=False`` however, these attributes
are no longer hidden and can be accessed as any other keys in the
output dictionary.
>>> lm = pg.linear_regression() # doctest: +SKIP
>>> lm['residuals'], lm['df_model'], lm['df_resid'] # doctest: +SKIP
When ``as_dataframe=False`` the dictionary also contains the
processed ``X`` and ``y`` arrays (i.e, with NaNs removed if
``remove_na=True``) and the model's predicted values ``pred``.
>>> lm['X'], lm['y'], lm['pred'] # doctest: +SKIP
For a weighted least squares fit, the weighted ``Xw`` and ``yw``
arrays are included in the dictionary.
>>> lm['Xw'], lm['yw'] # doctest: +SKIP
See also
--------
logistic_regression, mediation_analysis, corr
Notes
-----
The :math:`\\beta` coefficients are estimated using an ordinary least
squares (OLS) regression, as implemented in the
:py:func:`scipy.linalg.lstsq` function. The OLS method minimizes
the sum of squared residuals, and leads to a closed-form expression for
the estimated :math:`\\beta`:
.. math:: \\hat{\\beta} = (X^TX)^{-1} X^Ty
It is generally recommended to include a constant term (intercept) to the
model to limit the bias and force the residual mean to equal zero.
Note that intercept coefficient and p-values are however rarely meaningful.
The standard error of the estimates is a measure of the accuracy of the
prediction defined as:
.. math:: \\sigma = \\sqrt{\\text{MSE} \\cdot (X^TX)^{-1}}
where :math:`\\text{MSE}` is the mean squared error,
.. math::
\\text{MSE} = \\frac{SS_{\\text{resid}}}{n - p - 1}
= \\frac{\\sum{(\\text{true} - \\text{pred})^2}}{n - p - 1}
:math:`p` is the total number of predictor variables in the model
(excluding the intercept) and :math:`n` is the sample size.
Using the :math:`\\beta` coefficients and the standard errors,
the T-values can be obtained:
.. math:: T = \\frac{\\beta}{\\sigma}
and the p-values approximated using a T-distribution with
:math:`n - p - 1` degrees of freedom.
The coefficient of determination (:math:`R^2`) is defined as:
.. math:: R^2 = 1 - (\\frac{SS_{\\text{resid}}}{SS_{\\text{total}}})
The adjusted :math:`R^2` is defined as:
.. math:: \\overline{R}^2 = 1 - (1 - R^2) \\frac{n - 1}{n - p - 1}
The relative importance (``relimp``) column is a partitioning of the
total :math:`R^2` of the model into individual :math:`R^2` contribution.
This is calculated by taking the average over average contributions in
models of different sizes. For more details, please refer to
`Groemping et al. 2006 <http://dx.doi.org/10.18637/jss.v017.i01>`_
and the R package `relaimpo
<https://cran.r-project.org/web/packages/relaimpo/relaimpo.pdf>`_.
Note that Pingouin will automatically remove any duplicate columns
from :math:`X`, as well as any column with only one unique value
(constant), excluding the intercept.
Results have been compared against sklearn, R, statsmodels and JASP.
Examples
--------
1. Simple linear regression using columns of a pandas dataframe
In this first example, we'll use the tips dataset to see how well we
can predict the waiter's tip (in dollars) based on the total bill (also
in dollars).
>>> import numpy as np
>>> import pingouin as pg
>>> df = pg.read_dataset('tips')
>>> # Let's predict the tip ($) based on the total bill (also in $)
>>> lm = pg.linear_regression(df['total_bill'], df['tip'])
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 0.92 0.16 5.76 0.0 0.46 0.45 0.61 1.23
1 total_bill 0.11 0.01 14.26 0.0 0.46 0.45 0.09 0.12
It comes as no surprise that total bill is indeed a significant predictor
of the waiter's tip (T=14.26, p<0.05). The :math:`R^2` of the model is 0.46
and the adjusted :math:`R^2` is 0.45, which means that our model roughly
explains ~45% of the total variance in the tip amount.
2. Multiple linear regression
We can also have more than one predictor and run a multiple linear
regression. Below, we add the party size as a second predictor of tip.
>>> # We'll add a second predictor: the party size
>>> lm = pg.linear_regression(df[['total_bill', 'size']], df['tip'])
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 0.67 0.19 3.46 0.00 0.47 0.46 0.29 1.05
1 total_bill 0.09 0.01 10.17 0.00 0.47 0.46 0.07 0.11
2 size 0.19 0.09 2.26 0.02 0.47 0.46 0.02 0.36
The party size is also a significant predictor of tip (T=2.26, p=0.02).
Note that adding this new predictor however only improved the :math:`R^2`
of our model by ~1%.
This function also works with numpy arrays:
>>> X = df[['total_bill', 'size']].to_numpy()
>>> y = df['tip'].to_numpy()
>>> pg.linear_regression(X, y).round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 0.67 0.19 3.46 0.00 0.47 0.46 0.29 1.05
1 x1 0.09 0.01 10.17 0.00 0.47 0.46 0.07 0.11
2 x2 0.19 0.09 2.26 0.02 0.47 0.46 0.02 0.36
3. Get the residuals
>>> # For clarity, only display the first 9 values
>>> np.round(lm.residuals_, 2)[:9]
array([-1.62, -0.55, 0.31, 0.06, -0.11, 0.93, 0.13, -0.81, -0.49])
Using pandas, we can show a summary of the distribution of the residuals:
>>> import pandas as pd
>>> pd.Series(lm.residuals_).describe().round(2)
count 244.00
mean -0.00
std 1.01
min -2.93
25% -0.55
50% -0.09
75% 0.51
max 4.04
dtype: float64
5. No intercept and return only the regression coefficients
Sometimes it may be useful to remove the constant term from the regression,
or to only return the regression coefficients without calculating the
standard errors or p-values. This latter can potentially save you a lot of
time if you need to calculate hundreds of regression and only care about
the coefficients!
>>> pg.linear_regression(X, y, add_intercept=False, coef_only=True)
array([0.1007119 , 0.36209717])
6. Return a dictionnary instead of a dataframe
>>> lm_dict = pg.linear_regression(X, y, as_dataframe=False)
>>> lm_dict.keys()
dict_keys(['names', 'coef', 'se', 'T', 'pval', 'r2', 'adj_r2', 'CI[2.5%]',
'CI[97.5%]', 'df_model', 'df_resid', 'residuals', 'X', 'y',
'pred'])
7. Remove missing values
>>> X[4, 1] = np.nan
>>> y[7] = np.nan
>>> pg.linear_regression(X, y, remove_na=True, coef_only=True)
array([0.65749955, 0.09262059, 0.19927529])
8. Get the relative importance of predictors
>>> lm = pg.linear_regression(X, y, remove_na=True, relimp=True)
>>> lm[['names', 'relimp', 'relimp_perc']]
names relimp relimp_perc
0 Intercept NaN NaN
1 x1 0.342503 73.045583
2 x2 0.126386 26.954417
The ``relimp`` column is a partitioning of the total :math:`R^2` of the
model into individual contribution. Therefore, it sums to the :math:`R^2`
of the full model. The ``relimp_perc`` is normalized to sum to 100%. See
`Groemping 2006 <https://www.jstatsoft.org/article/view/v017i01>`_
for more details.
>>> lm[['relimp', 'relimp_perc']].sum()
relimp 0.468889
relimp_perc 100.000000
dtype: float64
9. Weighted linear regression
>>> X = [1, 2, 3, 4, 5, 6]
>>> y = [10, 22, 11, 13, 13, 16]
>>> w = [1, 0.1, 1, 1, 0.5, 1] # Array of weights. Must be >= 0.
>>> lm = pg.linear_regression(X, y, weights=w)
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 9.00 2.03 4.42 0.01 0.51 0.39 3.35 14.64
1 x1 1.04 0.50 2.06 0.11 0.51 0.39 -0.36 2.44
"""
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
# Convert input to numpy array
X = np.asarray(X)
y = np.asarray(y)
assert y.ndim == 1, "y must be one-dimensional."
assert 0 < alpha < 1
if X.ndim == 1:
# Convert to (n_samples, n_features) shape
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis="rows")
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, (
"Target (y) contains NaN or Inf. Please remove them " "manually or use remove_na=True."
)
assert X_gd, (
"Predictors (X) contain NaN or Inf. Please remove them " "manually or use remove_na=True."
)
# Check that X and y have same length
assert y.shape[0] == X.shape[0], "X and y must have same number of samples"
if not names:
names = ["x" + str(i + 1) for i in range(X.shape[1])]
if add_intercept:
# Add intercept
X = np.column_stack((np.ones(X.shape[0]), X))
names.insert(0, "Intercept")
# FINAL CHECKS BEFORE RUNNING LEAST SQUARES REGRESSION
# 1. Let's remove column(s) with only zero, otherwise the regression fails
n_nonzero = np.count_nonzero(X, axis=0)
idx_zero = np.flatnonzero(n_nonzero == 0) # Find columns that are only 0
if len(idx_zero):
X = np.delete(X, idx_zero, 1)
names = np.delete(names, idx_zero)
# 2. We also want to make sure that there is no more than one constant
# column (= intercept), otherwise the regression fails
# This is equivalent, but much faster, to pd.DataFrame(X).nunique()
idx_unique = np.where(np.all(X == X[0, :], axis=0))[0]
if len(idx_unique) > 1:
# We remove all but the first "Intercept" column.
X = np.delete(X, idx_unique[1:], 1)
names = np.delete(names, idx_unique[1:])
# Is there a constant in our predictor matrix? Useful for dof and R^2.
constant = 1 if len(idx_unique) > 0 else 0
# 3. Finally, we want to remove duplicate columns
if X.shape[1] > 1:
idx_duplicate = []
for pair in itertools.combinations(range(X.shape[1]), 2):
if np.array_equal(X[:, pair[0]], X[:, pair[1]]):
idx_duplicate.append(pair[1])
if len(idx_duplicate):
X = np.delete(X, idx_duplicate, 1)
names = np.delete(names, idx_duplicate)
# 4. Check that we have enough samples / features
n, p = X.shape[0], X.shape[1]
assert n >= 3, "At least three valid samples are required in X."
assert p >= 1, "X must have at least one valid column."
# 5. Handle weights
if weights is not None:
if relimp:
raise ValueError("relimp = True is not supported when using " "weights.")
w = np.asarray(weights)
assert w.ndim == 1, "weights must be a 1D array."
assert w.size == n, "weights must be of shape n_samples."
assert not np.isnan(w).any(), "Missing weights are not accepted."
assert not (w < 0).any(), "Negative weights are not accepted."
# Do not count weights == 0 in dof
# This gives similar results as R lm() but different from statsmodels
n = np.count_nonzero(w)
# Rescale (whitening)
wts = np.diag(np.sqrt(w))
Xw = wts @ X
yw = wts @ y
else:
# Set all weights to one, [1, 1, 1, ...]
w = np.ones(n)
Xw = X
yw = y
# FIT (WEIGHTED) LEAST SQUARES REGRESSION
coef, ss_res, rank, _ = lstsq(Xw, yw, cond=None)
ss_res = ss_res[0] if ss_res.shape == (1,) else ss_res
if coef_only:
return coef
calc_ss_res = False
if rank < Xw.shape[1]:
# in this case, ss_res is of shape (0,), i.e., an empty array
warnings.warn(
"Design matrix supplied with `X` parameter is rank "
f"deficient (rank {rank} with {Xw.shape[1]} columns). "
"That means that one or more of the columns in `X` "
"are a linear combination of one of more of the "
"other columns."
)
calc_ss_res = True
# Degrees of freedom
df_model = rank - constant
df_resid = n - rank
# Calculate predicted values and (weighted) residuals
pred = Xw @ coef
resid = yw - pred
if calc_ss_res:
# In case we did not get ss_res from lstsq due to rank deficiency
ss_res = (resid**2).sum()
# Calculate total (weighted) sums of squares and R^2
ss_tot = yw @ yw
ss_wtot = np.sum(w * (y - np.average(y, weights=w)) ** 2)
if constant:
r2 = 1 - ss_res / ss_wtot
else:
r2 = 1 - ss_res / ss_tot
adj_r2 = 1 - (1 - r2) * (n - constant) / df_resid
# Compute mean squared error, variance and SE
mse = ss_res / df_resid
beta_var = mse * (np.linalg.pinv(Xw.T @ Xw).diagonal())
beta_se = np.sqrt(beta_var)
# Compute T and p-values
T = coef / beta_se
pval = 2 * t.sf(np.fabs(T), df_resid)
# Compute confidence intervals
crit = t.ppf(1 - alpha / 2, df_resid)
marg_error = crit * beta_se
ll = coef - marg_error
ul = coef + marg_error
# Rename CI
ll_name = "CI[%.1f%%]" % (100 * alpha / 2)
ul_name = "CI[%.1f%%]" % (100 * (1 - alpha / 2))
# Create dict
stats = {
"names": names,
"coef": coef,
"se": beta_se,
"T": T,
"pval": pval,
"r2": r2,
"adj_r2": adj_r2,
ll_name: ll,
ul_name: ul,
}
# Relative importance
if relimp:
data = pd.concat(
[pd.DataFrame(y, columns=["y"]), pd.DataFrame(X, columns=names)], sort=False, axis=1
)
if "Intercept" in names:
# Intercept is the first column
reli = _relimp(data.drop(columns=["Intercept"]).cov(numeric_only=True))
reli["names"] = ["Intercept"] + reli["names"]
reli["relimp"] = np.insert(reli["relimp"], 0, np.nan)
reli["relimp_perc"] = np.insert(reli["relimp_perc"], 0, np.nan)
else:
reli = _relimp(data.cov(numeric_only=True))
stats.update(reli)
if as_dataframe:
stats = _postprocess_dataframe(pd.DataFrame(stats))
stats.df_model_ = df_model
stats.df_resid_ = df_resid
stats.residuals_ = 0 # Trick to avoid Pandas warning
stats.residuals_ = resid # Residuals is a hidden attribute
else:
stats["df_model"] = df_model
stats["df_resid"] = df_resid
stats["residuals"] = resid
stats["X"] = X
stats["y"] = y
stats["pred"] = pred
if weights is not None:
stats["yw"] = yw
stats["Xw"] = Xw
return stats
| (X, y, add_intercept=True, weights=None, coef_only=False, alpha=0.05, as_dataframe=True, remove_na=False, relimp=False) |
32,016 | pingouin.datasets | list_dataset | List available example datasets.
Returns
-------
datasets : :py:class:`pandas.DataFrame`
A dataframe with the name, description and reference of all the
datasets included in Pingouin.
Examples
--------
>>> import pingouin as pg
>>> all_datasets = pg.list_dataset()
>>> all_datasets.index.tolist()
['ancova',
'anova',
'anova2',
'anova2_unbalanced',
'anova3',
'anova3_unbalanced',
'blandaltman',
'chi2_independence',
'chi2_mcnemar',
'circular',
'cochran',
'cronbach_alpha',
'cronbach_wide_missing',
'icc',
'mediation',
'mixed_anova',
'mixed_anova_unbalanced',
'multivariate',
'pairwise_corr',
'pairwise_tests',
'pairwise_tests_missing',
'partial_corr',
'penguins',
'rm_anova',
'rm_anova_wide',
'rm_anova2',
'rm_corr',
'rm_missing',
'tips']
| def list_dataset():
"""List available example datasets.
Returns
-------
datasets : :py:class:`pandas.DataFrame`
A dataframe with the name, description and reference of all the
datasets included in Pingouin.
Examples
--------
>>> import pingouin as pg
>>> all_datasets = pg.list_dataset()
>>> all_datasets.index.tolist()
['ancova',
'anova',
'anova2',
'anova2_unbalanced',
'anova3',
'anova3_unbalanced',
'blandaltman',
'chi2_independence',
'chi2_mcnemar',
'circular',
'cochran',
'cronbach_alpha',
'cronbach_wide_missing',
'icc',
'mediation',
'mixed_anova',
'mixed_anova_unbalanced',
'multivariate',
'pairwise_corr',
'pairwise_tests',
'pairwise_tests_missing',
'partial_corr',
'penguins',
'rm_anova',
'rm_anova_wide',
'rm_anova2',
'rm_corr',
'rm_missing',
'tips']
"""
return dts.set_index("dataset")
| () |
32,017 | pingouin.regression | logistic_regression | (Multiple) Binary logistic regression.
Parameters
----------
X : array_like
Predictor(s), of shape *(n_samples, n_features)* or *(n_samples)*.
y : array_like
Dependent variable, of shape *(n_samples)*.
``y`` must be binary, i.e. only contains 0 or 1. Multinomial logistic
regression is not supported.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
:math:`\text{CI} = [\alpha / 2 ; 1 - \alpha / 2]`
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed). Default is False, which will raise an error if missing
values are present in either the predictor(s) or dependent
variable.
**kwargs : optional
Optional arguments passed to
:py:class:`sklearn.linear_model.LogisticRegression` (see Notes).
Returns
-------
stats : :py:class:`pandas.DataFrame` or dict
Logistic regression summary:
* ``'names'``: name of variable(s) in the model (e.g. x1, x2...)
* ``'coef'``: regression coefficients (log-odds)
* ``'se'``: standard error
* ``'z'``: z-scores
* ``'pval'``: two-tailed p-values
* ``'CI[2.5%]'``: lower confidence interval
* ``'CI[97.5%]'``: upper confidence interval
See also
--------
linear_regression
Notes
-----
.. caution:: This function is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class. However,
Pingouin internally disables the L2 regularization and changes the
default solver to 'newton-cg' to obtain results that are similar to R and
statsmodels.
Logistic regression assumes that the log-odds (the logarithm of the
odds) for the value labeled "1" in the response variable is a linear
combination of the predictor variables. The log-odds are given by the
`logit <https://en.wikipedia.org/wiki/Logit>`_ function,
which map a probability :math:`p` of the response variable being "1"
from :math:`[0, 1)` to :math:`(-\infty, +\infty)`.
.. math:: \text{logit}(p) = \ln \frac{p}{1 - p} = \beta_0 + \beta X
The odds of the response variable being "1" can be obtained by
exponentiating the log-odds:
.. math:: \frac{p}{1 - p} = e^{\beta_0 + \beta X}
and the probability of the response variable being "1" is given by the
`logistic function <https://en.wikipedia.org/wiki/Logistic_function>`_:
.. math:: p = \frac{1}{1 + e^{-(\beta_0 + \beta X})}
The first coefficient is always the constant term (intercept) of
the model. Pingouin will automatically add the intercept
to your predictor(s) matrix, therefore, :math:`X` should not include a
constant term. Pingouin will remove any constant term (e.g column with only
one unique value), or duplicate columns from :math:`X`.
The calculation of the p-values and confidence interval is adapted from a
`code by Rob Speare
<https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d>`_.
Results have been compared against statsmodels, R, and JASP.
Examples
--------
1. Simple binary logistic regression.
In this first example, we'll use the
`penguins dataset <https://github.com/allisonhorst/palmerpenguins>`_
to see how well we can predict the sex of penguins based on their
bodies mass.
>>> import numpy as np
>>> import pandas as pd
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> # Let's first convert the target variable from string to boolean:
>>> df['male'] = (df['sex'] == 'male').astype(int) # male: 1, female: 0
>>> # Since there are missing values in our outcome variable, we need to
>>> # set `remove_na=True` otherwise regression will fail.
>>> lom = pg.logistic_regression(df['body_mass_g'], df['male'],
... remove_na=True)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -5.16 0.71 -7.24 0.0 -6.56 -3.77
1 body_mass_g 0.00 0.00 7.24 0.0 0.00 0.00
Body mass is a significant predictor of sex (p<0.001). Here, it
could be useful to rescale our predictor variable from *g* to *kg*
(e.g divide by 1000) in order to get more intuitive coefficients and
confidence intervals:
>>> df['body_mass_kg'] = df['body_mass_g'] / 1000
>>> lom = pg.logistic_regression(df['body_mass_kg'], df['male'],
... remove_na=True)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -5.16 0.71 -7.24 0.0 -6.56 -3.77
1 body_mass_kg 1.23 0.17 7.24 0.0 0.89 1.56
2. Multiple binary logistic regression
We'll now add the species as a categorical predictor in our model. To do
so, we first need to dummy-code our categorical variable, dropping the
first level of our categorical variable (species = Adelie) which will be
used as the reference level:
>>> df = pd.get_dummies(df, columns=['species'], dtype=float, drop_first=True)
>>> X = df[['body_mass_kg', 'species_Chinstrap', 'species_Gentoo']]
>>> y = df['male']
>>> lom = pg.logistic_regression(X, y, remove_na=True)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -26.24 2.84 -9.24 0.00 -31.81 -20.67
1 body_mass_kg 7.10 0.77 9.23 0.00 5.59 8.61
2 species_Chinstrap -0.13 0.42 -0.31 0.75 -0.96 0.69
3 species_Gentoo -9.72 1.12 -8.65 0.00 -11.92 -7.52
3. Using NumPy aray and returning only the coefficients
>>> pg.logistic_regression(X.to_numpy(), y.to_numpy(), coef_only=True,
... remove_na=True)
array([-26.23906892, 7.09826571, -0.13180626, -9.71718529])
4. Passing custom parameters to sklearn
>>> lom = pg.logistic_regression(X, y, solver='sag', max_iter=10000,
... random_state=42, remove_na=True)
>>> print(lom['coef'].to_numpy())
[-25.98248153 7.02881472 -0.13119779 -9.62247569]
**How to interpret the log-odds coefficients?**
We'll use the `Wikipedia example
<https://en.wikipedia.org/wiki/Logistic_regression#Probability_of_passing_an_exam_versus_hours_of_study>`_
of the probability of passing an exam
versus the hours of study:
*A group of 20 students spends between 0 and 6 hours studying for an
exam. How does the number of hours spent studying affect the
probability of the student passing the exam?*
>>> # First, let's create the dataframe
>>> Hours = [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50,
... 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50]
>>> Pass = [0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1]
>>> df = pd.DataFrame({'HoursStudy': Hours, 'PassExam': Pass})
>>> # And then run the logistic regression
>>> lr = pg.logistic_regression(df['HoursStudy'], df['PassExam']).round(3)
>>> lr
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -4.078 1.761 -2.316 0.021 -7.529 -0.626
1 HoursStudy 1.505 0.629 2.393 0.017 0.272 2.737
The ``Intercept`` coefficient (-4.078) is the log-odds of ``PassExam=1``
when ``HoursStudy=0``. The odds ratio can be obtained by exponentiating
the log-odds:
>>> np.exp(-4.078)
0.016941314421496552
i.e. :math:`0.017:1`. Conversely the odds of failing the exam are
:math:`(1/0.017) \approx 59:1`.
The probability can then be obtained with the following equation
.. math:: p = \frac{1}{1 + e^{-(-4.078 + 0 * 1.505)}}
>>> 1 / (1 + np.exp(-(-4.078)))
0.016659087580814722
The ``HoursStudy`` coefficient (1.505) means that for each additional hour
of study, the log-odds of passing the exam increase by 1.505, and the odds
are multipled by :math:`e^{1.505} \approx 4.50`.
For example, a student who studies 2 hours has a probability of passing
the exam of 25%:
>>> 1 / (1 + np.exp(-(-4.078 + 2 * 1.505)))
0.2557836148964987
The table below shows the probability of passing the exam for several
values of ``HoursStudy``:
+----------------+----------+----------------+------------------+
| Hours of Study | Log-odds | Odds | Probability |
+================+==========+================+==================+
| 0 | −4.08 | 0.017 ≈ 1:59 | 0.017 |
+----------------+----------+----------------+------------------+
| 1 | −2.57 | 0.076 ≈ 1:13 | 0.07 |
+----------------+----------+----------------+------------------+
| 2 | −1.07 | 0.34 ≈ 1:3 | 0.26 |
+----------------+----------+----------------+------------------+
| 3 | 0.44 | 1.55 | 0.61 |
+----------------+----------+----------------+------------------+
| 4 | 1.94 | 6.96 | 0.87 |
+----------------+----------+----------------+------------------+
| 5 | 3.45 | 31.4 | 0.97 |
+----------------+----------+----------------+------------------+
| 6 | 4.96 | 141.4 | 0.99 |
+----------------+----------+----------------+------------------+
| def logistic_regression(
X, y, coef_only=False, alpha=0.05, as_dataframe=True, remove_na=False, **kwargs
):
"""(Multiple) Binary logistic regression.
Parameters
----------
X : array_like
Predictor(s), of shape *(n_samples, n_features)* or *(n_samples)*.
y : array_like
Dependent variable, of shape *(n_samples)*.
``y`` must be binary, i.e. only contains 0 or 1. Multinomial logistic
regression is not supported.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
:math:`\\text{CI} = [\\alpha / 2 ; 1 - \\alpha / 2]`
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed). Default is False, which will raise an error if missing
values are present in either the predictor(s) or dependent
variable.
**kwargs : optional
Optional arguments passed to
:py:class:`sklearn.linear_model.LogisticRegression` (see Notes).
Returns
-------
stats : :py:class:`pandas.DataFrame` or dict
Logistic regression summary:
* ``'names'``: name of variable(s) in the model (e.g. x1, x2...)
* ``'coef'``: regression coefficients (log-odds)
* ``'se'``: standard error
* ``'z'``: z-scores
* ``'pval'``: two-tailed p-values
* ``'CI[2.5%]'``: lower confidence interval
* ``'CI[97.5%]'``: upper confidence interval
See also
--------
linear_regression
Notes
-----
.. caution:: This function is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class. However,
Pingouin internally disables the L2 regularization and changes the
default solver to 'newton-cg' to obtain results that are similar to R and
statsmodels.
Logistic regression assumes that the log-odds (the logarithm of the
odds) for the value labeled "1" in the response variable is a linear
combination of the predictor variables. The log-odds are given by the
`logit <https://en.wikipedia.org/wiki/Logit>`_ function,
which map a probability :math:`p` of the response variable being "1"
from :math:`[0, 1)` to :math:`(-\\infty, +\\infty)`.
.. math:: \\text{logit}(p) = \\ln \\frac{p}{1 - p} = \\beta_0 + \\beta X
The odds of the response variable being "1" can be obtained by
exponentiating the log-odds:
.. math:: \\frac{p}{1 - p} = e^{\\beta_0 + \\beta X}
and the probability of the response variable being "1" is given by the
`logistic function <https://en.wikipedia.org/wiki/Logistic_function>`_:
.. math:: p = \\frac{1}{1 + e^{-(\\beta_0 + \\beta X})}
The first coefficient is always the constant term (intercept) of
the model. Pingouin will automatically add the intercept
to your predictor(s) matrix, therefore, :math:`X` should not include a
constant term. Pingouin will remove any constant term (e.g column with only
one unique value), or duplicate columns from :math:`X`.
The calculation of the p-values and confidence interval is adapted from a
`code by Rob Speare
<https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d>`_.
Results have been compared against statsmodels, R, and JASP.
Examples
--------
1. Simple binary logistic regression.
In this first example, we'll use the
`penguins dataset <https://github.com/allisonhorst/palmerpenguins>`_
to see how well we can predict the sex of penguins based on their
bodies mass.
>>> import numpy as np
>>> import pandas as pd
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> # Let's first convert the target variable from string to boolean:
>>> df['male'] = (df['sex'] == 'male').astype(int) # male: 1, female: 0
>>> # Since there are missing values in our outcome variable, we need to
>>> # set `remove_na=True` otherwise regression will fail.
>>> lom = pg.logistic_regression(df['body_mass_g'], df['male'],
... remove_na=True)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -5.16 0.71 -7.24 0.0 -6.56 -3.77
1 body_mass_g 0.00 0.00 7.24 0.0 0.00 0.00
Body mass is a significant predictor of sex (p<0.001). Here, it
could be useful to rescale our predictor variable from *g* to *kg*
(e.g divide by 1000) in order to get more intuitive coefficients and
confidence intervals:
>>> df['body_mass_kg'] = df['body_mass_g'] / 1000
>>> lom = pg.logistic_regression(df['body_mass_kg'], df['male'],
... remove_na=True)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -5.16 0.71 -7.24 0.0 -6.56 -3.77
1 body_mass_kg 1.23 0.17 7.24 0.0 0.89 1.56
2. Multiple binary logistic regression
We'll now add the species as a categorical predictor in our model. To do
so, we first need to dummy-code our categorical variable, dropping the
first level of our categorical variable (species = Adelie) which will be
used as the reference level:
>>> df = pd.get_dummies(df, columns=['species'], dtype=float, drop_first=True)
>>> X = df[['body_mass_kg', 'species_Chinstrap', 'species_Gentoo']]
>>> y = df['male']
>>> lom = pg.logistic_regression(X, y, remove_na=True)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -26.24 2.84 -9.24 0.00 -31.81 -20.67
1 body_mass_kg 7.10 0.77 9.23 0.00 5.59 8.61
2 species_Chinstrap -0.13 0.42 -0.31 0.75 -0.96 0.69
3 species_Gentoo -9.72 1.12 -8.65 0.00 -11.92 -7.52
3. Using NumPy aray and returning only the coefficients
>>> pg.logistic_regression(X.to_numpy(), y.to_numpy(), coef_only=True,
... remove_na=True)
array([-26.23906892, 7.09826571, -0.13180626, -9.71718529])
4. Passing custom parameters to sklearn
>>> lom = pg.logistic_regression(X, y, solver='sag', max_iter=10000,
... random_state=42, remove_na=True)
>>> print(lom['coef'].to_numpy())
[-25.98248153 7.02881472 -0.13119779 -9.62247569]
**How to interpret the log-odds coefficients?**
We'll use the `Wikipedia example
<https://en.wikipedia.org/wiki/Logistic_regression#Probability_of_passing_an_exam_versus_hours_of_study>`_
of the probability of passing an exam
versus the hours of study:
*A group of 20 students spends between 0 and 6 hours studying for an
exam. How does the number of hours spent studying affect the
probability of the student passing the exam?*
>>> # First, let's create the dataframe
>>> Hours = [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50,
... 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50]
>>> Pass = [0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1]
>>> df = pd.DataFrame({'HoursStudy': Hours, 'PassExam': Pass})
>>> # And then run the logistic regression
>>> lr = pg.logistic_regression(df['HoursStudy'], df['PassExam']).round(3)
>>> lr
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -4.078 1.761 -2.316 0.021 -7.529 -0.626
1 HoursStudy 1.505 0.629 2.393 0.017 0.272 2.737
The ``Intercept`` coefficient (-4.078) is the log-odds of ``PassExam=1``
when ``HoursStudy=0``. The odds ratio can be obtained by exponentiating
the log-odds:
>>> np.exp(-4.078)
0.016941314421496552
i.e. :math:`0.017:1`. Conversely the odds of failing the exam are
:math:`(1/0.017) \\approx 59:1`.
The probability can then be obtained with the following equation
.. math:: p = \\frac{1}{1 + e^{-(-4.078 + 0 * 1.505)}}
>>> 1 / (1 + np.exp(-(-4.078)))
0.016659087580814722
The ``HoursStudy`` coefficient (1.505) means that for each additional hour
of study, the log-odds of passing the exam increase by 1.505, and the odds
are multipled by :math:`e^{1.505} \\approx 4.50`.
For example, a student who studies 2 hours has a probability of passing
the exam of 25%:
>>> 1 / (1 + np.exp(-(-4.078 + 2 * 1.505)))
0.2557836148964987
The table below shows the probability of passing the exam for several
values of ``HoursStudy``:
+----------------+----------+----------------+------------------+
| Hours of Study | Log-odds | Odds | Probability |
+================+==========+================+==================+
| 0 | −4.08 | 0.017 ≈ 1:59 | 0.017 |
+----------------+----------+----------------+------------------+
| 1 | −2.57 | 0.076 ≈ 1:13 | 0.07 |
+----------------+----------+----------------+------------------+
| 2 | −1.07 | 0.34 ≈ 1:3 | 0.26 |
+----------------+----------+----------------+------------------+
| 3 | 0.44 | 1.55 | 0.61 |
+----------------+----------+----------------+------------------+
| 4 | 1.94 | 6.96 | 0.87 |
+----------------+----------+----------------+------------------+
| 5 | 3.45 | 31.4 | 0.97 |
+----------------+----------+----------------+------------------+
| 6 | 4.96 | 141.4 | 0.99 |
+----------------+----------+----------------+------------------+
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from sklearn.linear_model import LogisticRegression
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
# Convert to numpy array
X = np.asarray(X)
y = np.asarray(y)
assert y.ndim == 1, "y must be one-dimensional."
assert 0 < alpha < 1, "alpha must be between 0 and 1."
# Add axis if only one-dimensional array
if X.ndim == 1:
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis="rows")
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, (
"Target (y) contains NaN or Inf. Please remove them " "manually or use remove_na=True."
)
assert X_gd, (
"Predictors (X) contain NaN or Inf. Please remove them " "manually or use remove_na=True."
)
# Check that X and y have same length
assert y.shape[0] == X.shape[0], "X and y must have same number of samples"
# Check that y is binary
if np.unique(y).size != 2:
raise ValueError("Dependent variable must be binary.")
if not names:
names = ["x" + str(i + 1) for i in range(X.shape[1])]
# We also want to make sure that there is no column
# with only one unique value, otherwise the regression fails
# This is equivalent, but much faster, to pd.DataFrame(X).nunique()
idx_unique = np.where(np.all(X == X[0, :], axis=0))[0]
if len(idx_unique):
X = np.delete(X, idx_unique, 1)
names = np.delete(names, idx_unique).tolist()
# Finally, we want to remove duplicate columns
if X.shape[1] > 1:
idx_duplicate = []
for pair in itertools.combinations(range(X.shape[1]), 2):
if np.array_equal(X[:, pair[0]], X[:, pair[1]]):
idx_duplicate.append(pair[1])
if len(idx_duplicate):
X = np.delete(X, idx_duplicate, 1)
names = np.delete(names, idx_duplicate).tolist()
# Initialize and fit
if "solver" not in kwargs:
# https://stats.stackexchange.com/a/204324/253579
# Updated in Pingouin > 0.3.6 to be consistent with R
kwargs["solver"] = "newton-cg"
if "penalty" not in kwargs:
kwargs["penalty"] = "none"
lom = LogisticRegression(**kwargs)
lom.fit(X, y)
if lom.get_params()["fit_intercept"]:
names.insert(0, "Intercept")
X_design = np.column_stack((np.ones(X.shape[0]), X))
coef = np.append(lom.intercept_, lom.coef_)
else:
coef = lom.coef_
X_design = X
if coef_only:
return coef
# Fisher Information Matrix
n, p = X_design.shape
denom = 2 * (1 + np.cosh(lom.decision_function(X)))
denom = np.tile(denom, (p, 1)).T
fim = (X_design / denom).T @ X_design
crao = np.linalg.pinv(fim)
# Standard error and Z-scores
se = np.sqrt(np.diag(crao))
z_scores = coef / se
# Two-tailed p-values
pval = 2 * norm.sf(np.fabs(z_scores))
# Wald Confidence intervals
# In R: this is equivalent to confint.default(model)
# Note that confint(model) will however return the profile CI
crit = norm.ppf(1 - alpha / 2)
ll = coef - crit * se
ul = coef + crit * se
# Rename CI
ll_name = "CI[%.1f%%]" % (100 * alpha / 2)
ul_name = "CI[%.1f%%]" % (100 * (1 - alpha / 2))
# Create dict
stats = {
"names": names,
"coef": coef,
"se": se,
"z": z_scores,
"pval": pval,
ll_name: ll,
ul_name: ul,
}
if as_dataframe:
return _postprocess_dataframe(pd.DataFrame(stats))
else:
return stats
| (X, y, coef_only=False, alpha=0.05, as_dataframe=True, remove_na=False, **kwargs) |
32,018 | pingouin.nonparametric | mad |
Median Absolute Deviation (MAD) along given axis of an array.
Parameters
----------
a : array-like
Input array.
normalize : boolean.
If True, scale by a normalization constant :math:`c \approx 0.67`
to ensure consistency with the standard deviation for normally
distributed data.
axis : int or None, optional
Axis along which the MAD is computed. Default is 0.
Can also be None to compute the MAD over the entire array.
Returns
-------
mad : float
mad = median(abs(a - median(a))) / c
See also
--------
madmedianrule, numpy.std
Notes
-----
The `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ (MAD) computes
the median over the absolute deviations from the median. It is a measure of
dispersion similar to the standard deviation, but is more robust to
outliers.
SciPy 1.3 and higher includes a similar function:
:py:func:`scipy.stats.median_abs_deviation`.
Please note that missing values are automatically removed.
Examples
--------
>>> from pingouin import mad
>>> a = [1.2, 5.4, 3.2, 7.8, 2.5]
>>> mad(a)
2.965204437011204
>>> mad(a, normalize=False)
2.0
2D arrays with missing values (axis handling example)
>>> import numpy as np
>>> np.random.seed(123)
>>> w = np.random.normal(size=(5, 10))
>>> w[3, 2] = np.nan
>>> mad(w) # Axis = 0 (default) = iterate over the columns
array([0.60304023, 2.35057834, 0.90350696, 1.28599837, 1.16024152,
0.38653752, 1.92564066, 1.2480913 , 0.42580373, 1.69814622])
>>> mad(w, axis=1) # Axis = 1 = iterate over the rows
array([1.32639022, 1.19295036, 1.41198672, 0.78020689, 1.01531254])
>>> mad(w, axis=None) # Axis = None = over the entire array
1.1607762457644006
Compare with Scipy >= 1.3
>>> from scipy.stats import median_abs_deviation
>>> median_abs_deviation(w, scale='normal', axis=None, nan_policy='omit')
1.1607762457644006
| def mad(a, normalize=True, axis=0):
"""
Median Absolute Deviation (MAD) along given axis of an array.
Parameters
----------
a : array-like
Input array.
normalize : boolean.
If True, scale by a normalization constant :math:`c \\approx 0.67`
to ensure consistency with the standard deviation for normally
distributed data.
axis : int or None, optional
Axis along which the MAD is computed. Default is 0.
Can also be None to compute the MAD over the entire array.
Returns
-------
mad : float
mad = median(abs(a - median(a))) / c
See also
--------
madmedianrule, numpy.std
Notes
-----
The `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ (MAD) computes
the median over the absolute deviations from the median. It is a measure of
dispersion similar to the standard deviation, but is more robust to
outliers.
SciPy 1.3 and higher includes a similar function:
:py:func:`scipy.stats.median_abs_deviation`.
Please note that missing values are automatically removed.
Examples
--------
>>> from pingouin import mad
>>> a = [1.2, 5.4, 3.2, 7.8, 2.5]
>>> mad(a)
2.965204437011204
>>> mad(a, normalize=False)
2.0
2D arrays with missing values (axis handling example)
>>> import numpy as np
>>> np.random.seed(123)
>>> w = np.random.normal(size=(5, 10))
>>> w[3, 2] = np.nan
>>> mad(w) # Axis = 0 (default) = iterate over the columns
array([0.60304023, 2.35057834, 0.90350696, 1.28599837, 1.16024152,
0.38653752, 1.92564066, 1.2480913 , 0.42580373, 1.69814622])
>>> mad(w, axis=1) # Axis = 1 = iterate over the rows
array([1.32639022, 1.19295036, 1.41198672, 0.78020689, 1.01531254])
>>> mad(w, axis=None) # Axis = None = over the entire array
1.1607762457644006
Compare with Scipy >= 1.3
>>> from scipy.stats import median_abs_deviation
>>> median_abs_deviation(w, scale='normal', axis=None, nan_policy='omit')
1.1607762457644006
"""
a = np.asarray(a)
if axis is None:
# Calculate the MAD over the entire array
a = np.ravel(a)
axis = 0
c = scipy.stats.norm.ppf(3 / 4.0) if normalize else 1
center = np.apply_over_axes(np.nanmedian, a, axis)
return np.nanmedian((np.fabs(a - center)) / c, axis=axis)
| (a, normalize=True, axis=0) |
32,019 | pingouin.nonparametric | madmedianrule | Robust outlier detection based on the MAD-median rule.
Parameters
----------
a : array-like
Input array. Must be one-dimensional.
Returns
-------
outliers: boolean (same shape as a)
Boolean array indicating whether each sample is an outlier (True) or
not (False).
See also
--------
mad
Notes
-----
The MAD-median-rule ([1]_, [2]_) will refer to declaring :math:`X_i`
an outlier if
.. math::
\frac{\left | X_i - M \right |}{\text{MAD}_{\text{norm}}} > K,
where :math:`M` is the median of :math:`X`,
:math:`\text{MAD}_{\text{norm}}` the normalized median absolute deviation
of :math:`X`, and :math:`K` is the square
root of the .975 quantile of a :math:`X^2` distribution with one degree
of freedom, which is roughly equal to 2.24.
References
----------
.. [1] Hall, P., Welsh, A.H., 1985. Limit theorems for the median
deviation. Ann. Inst. Stat. Math. 37, 27–36.
https://doi.org/10.1007/BF02481078
.. [2] Wilcox, R. R. Introduction to Robust Estimation and Hypothesis
Testing. (Academic Press, 2011).
Examples
--------
>>> import pingouin as pg
>>> a = [-1.09, 1., 0.28, -1.51, -0.58, 6.61, -2.43, -0.43]
>>> pg.madmedianrule(a)
array([False, False, False, False, False, True, False, False])
| def madmedianrule(a):
"""Robust outlier detection based on the MAD-median rule.
Parameters
----------
a : array-like
Input array. Must be one-dimensional.
Returns
-------
outliers: boolean (same shape as a)
Boolean array indicating whether each sample is an outlier (True) or
not (False).
See also
--------
mad
Notes
-----
The MAD-median-rule ([1]_, [2]_) will refer to declaring :math:`X_i`
an outlier if
.. math::
\\frac{\\left | X_i - M \\right |}{\\text{MAD}_{\\text{norm}}} > K,
where :math:`M` is the median of :math:`X`,
:math:`\\text{MAD}_{\\text{norm}}` the normalized median absolute deviation
of :math:`X`, and :math:`K` is the square
root of the .975 quantile of a :math:`X^2` distribution with one degree
of freedom, which is roughly equal to 2.24.
References
----------
.. [1] Hall, P., Welsh, A.H., 1985. Limit theorems for the median
deviation. Ann. Inst. Stat. Math. 37, 27–36.
https://doi.org/10.1007/BF02481078
.. [2] Wilcox, R. R. Introduction to Robust Estimation and Hypothesis
Testing. (Academic Press, 2011).
Examples
--------
>>> import pingouin as pg
>>> a = [-1.09, 1., 0.28, -1.51, -0.58, 6.61, -2.43, -0.43]
>>> pg.madmedianrule(a)
array([False, False, False, False, False, True, False, False])
"""
a = np.asarray(a)
assert a.ndim == 1, "Only 1D array / list are supported for this function."
k = np.sqrt(scipy.stats.chi2.ppf(0.975, 1))
return (np.fabs(a - np.median(a)) / mad(a)) > k
| (a) |
32,020 | pingouin.regression | mediation_analysis | Mediation analysis using a bias-correct non-parametric bootstrap method.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Dataframe.
x : str
Column name in data containing the predictor variable.
The predictor variable must be continuous.
m : str or list of str
Column name(s) in data containing the mediator variable(s).
The mediator(s) can be continuous or binary (e.g. 0 or 1).
This function supports multiple parallel mediators.
y : str
Column name in data containing the outcome variable.
The outcome variable must be continuous.
covar : None, str, or list
Covariate(s). If not None, the specified covariate(s) will be included
in all regressions.
alpha : float
Significance threshold. Used to determine the confidence interval,
:math:`\text{CI} = [\alpha / 2 ; 1 - \alpha / 2]`.
n_boot : int
Number of bootstrap iterations for confidence intervals and p-values
estimation. The greater, the slower.
seed : int or None
Random state seed.
logreg_kwargs : dict or None
Dictionary with optional arguments passed to :py:func:`pingouin.logistic_regression`
return_dist : bool
If True, the function also returns the indirect bootstrapped beta
samples (size = n_boot). Can be plotted for instance using
:py:func:`seaborn.distplot()` or :py:func:`seaborn.kdeplot()`
functions.
Returns
-------
stats : :py:class:`pandas.DataFrame`
Mediation summary:
* ``'path'``: regression model
* ``'coef'``: regression estimates
* ``'se'``: standard error
* ``'CI[2.5%]'``: lower confidence interval
* ``'CI[97.5%]'``: upper confidence interval
* ``'pval'``: two-sided p-values
* ``'sig'``: statistical significance
See also
--------
linear_regression, logistic_regression
Notes
-----
Mediation analysis [1]_ is a *"statistical procedure to test
whether the effect of an independent variable X on a dependent variable
Y (i.e., X → Y) is at least partly explained by a chain of effects of the
independent variable on an intervening mediator variable M and of the
intervening variable on the dependent variable (i.e., X → M → Y)"* [2]_.
The **indirect effect** (also referred to as average causal mediation
effect or ACME) of X on Y through mediator M quantifies the estimated
difference in Y resulting from a one-unit change in X through a sequence of
causal steps in which X affects M, which in turn affects Y.
It is considered significant if the specified confidence interval does not
include 0. The path 'X --> Y' is the sum of both the indirect and direct
effect. It is sometimes referred to as total effect.
A linear regression is used if the mediator variable is continuous and a
logistic regression if the mediator variable is dichotomous (binary).
Multiple parallel mediators are also supported.
This function will only work well if the outcome variable is continuous.
It does not support binary or ordinal outcome variable. For more
advanced mediation models, please refer to the
`lavaan <http://lavaan.ugent.be/tutorial/mediation.html>`_ or `mediation
<https://cran.r-project.org/web/packages/mediation/mediation.pdf>`_ R
packages, or the `PROCESS macro
<https://www.processmacro.org/index.html>`_ for SPSS.
The two-sided p-value of the indirect effect is computed using the
bootstrap distribution, as in the mediation R package. However, the p-value
should be interpreted with caution since it is not constructed
conditioned on a true null hypothesis [3]_ and varies depending on the
number of bootstrap samples and the random seed.
Note that rows with missing values are automatically removed.
Results have been tested against the R mediation package and this tutorial
https://data.library.virginia.edu/introduction-to-mediation-analysis/
References
----------
.. [1] Baron, R. M. & Kenny, D. A. The moderator–mediator variable
distinction in social psychological research: Conceptual, strategic,
and statistical considerations. J. Pers. Soc. Psychol. 51, 1173–1182
(1986).
.. [2] Fiedler, K., Schott, M. & Meiser, T. What mediation analysis can
(not) do. J. Exp. Soc. Psychol. 47, 1231–1236 (2011).
.. [3] Hayes, A. F. & Rockwood, N. J. Regression-based statistical
mediation and moderation analysis in clinical research:
Observations, recommendations, and implementation. Behav. Res.
Ther. 98, 39–57 (2017).
Code originally adapted from https://github.com/rmill040/pymediation.
Examples
--------
1. Simple mediation analysis
>>> from pingouin import mediation_analysis, read_dataset
>>> df = read_dataset('mediation')
>>> mediation_analysis(data=df, x='X', m='M', y='Y', alpha=0.05,
... seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.561015 0.094480 4.391362e-08 0.373522 0.748509 Yes
1 Y ~ M 0.654173 0.085831 1.612674e-11 0.483844 0.824501 Yes
2 Total 0.396126 0.111160 5.671128e-04 0.175533 0.616719 Yes
3 Direct 0.039604 0.109648 7.187429e-01 -0.178018 0.257226 No
4 Indirect 0.356522 0.083313 0.000000e+00 0.219818 0.537654 Yes
2. Return the indirect bootstrapped beta coefficients
>>> stats, dist = mediation_analysis(data=df, x='X', m='M', y='Y',
... return_dist=True)
>>> print(dist.shape)
(500,)
3. Mediation analysis with a binary mediator variable
>>> mediation_analysis(data=df, x='X', m='Mbin', y='Y', seed=42).round(3)
path coef se pval CI[2.5%] CI[97.5%] sig
0 Mbin ~ X -0.021 0.116 0.857 -0.248 0.206 No
1 Y ~ Mbin -0.135 0.412 0.743 -0.952 0.682 No
2 Total 0.396 0.111 0.001 0.176 0.617 Yes
3 Direct 0.396 0.112 0.001 0.174 0.617 Yes
4 Indirect 0.002 0.050 0.960 -0.072 0.146 No
4. Mediation analysis with covariates
>>> mediation_analysis(data=df, x='X', m='M', y='Y',
... covar=['Mbin', 'Ybin'], seed=42).round(3)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.559 0.097 0.000 0.367 0.752 Yes
1 Y ~ M 0.666 0.086 0.000 0.495 0.837 Yes
2 Total 0.420 0.113 0.000 0.196 0.645 Yes
3 Direct 0.064 0.110 0.561 -0.155 0.284 No
4 Indirect 0.356 0.086 0.000 0.209 0.553 Yes
5. Mediation analysis with multiple parallel mediators
>>> mediation_analysis(data=df, x='X', m=['M', 'Mbin'], y='Y',
... seed=42).round(3)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.561 0.094 0.000 0.374 0.749 Yes
1 Mbin ~ X -0.005 0.029 0.859 -0.063 0.052 No
2 Y ~ M 0.654 0.086 0.000 0.482 0.825 Yes
3 Y ~ Mbin -0.064 0.328 0.846 -0.715 0.587 No
4 Total 0.396 0.111 0.001 0.176 0.617 Yes
5 Direct 0.040 0.110 0.721 -0.179 0.258 No
6 Indirect M 0.356 0.085 0.000 0.215 0.538 Yes
7 Indirect Mbin 0.000 0.010 0.952 -0.017 0.025 No
| @pf.register_dataframe_method
def mediation_analysis(
data=None,
x=None,
m=None,
y=None,
covar=None,
alpha=0.05,
n_boot=500,
seed=None,
return_dist=False,
logreg_kwargs=None,
):
"""Mediation analysis using a bias-correct non-parametric bootstrap method.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Dataframe.
x : str
Column name in data containing the predictor variable.
The predictor variable must be continuous.
m : str or list of str
Column name(s) in data containing the mediator variable(s).
The mediator(s) can be continuous or binary (e.g. 0 or 1).
This function supports multiple parallel mediators.
y : str
Column name in data containing the outcome variable.
The outcome variable must be continuous.
covar : None, str, or list
Covariate(s). If not None, the specified covariate(s) will be included
in all regressions.
alpha : float
Significance threshold. Used to determine the confidence interval,
:math:`\\text{CI} = [\\alpha / 2 ; 1 - \\alpha / 2]`.
n_boot : int
Number of bootstrap iterations for confidence intervals and p-values
estimation. The greater, the slower.
seed : int or None
Random state seed.
logreg_kwargs : dict or None
Dictionary with optional arguments passed to :py:func:`pingouin.logistic_regression`
return_dist : bool
If True, the function also returns the indirect bootstrapped beta
samples (size = n_boot). Can be plotted for instance using
:py:func:`seaborn.distplot()` or :py:func:`seaborn.kdeplot()`
functions.
Returns
-------
stats : :py:class:`pandas.DataFrame`
Mediation summary:
* ``'path'``: regression model
* ``'coef'``: regression estimates
* ``'se'``: standard error
* ``'CI[2.5%]'``: lower confidence interval
* ``'CI[97.5%]'``: upper confidence interval
* ``'pval'``: two-sided p-values
* ``'sig'``: statistical significance
See also
--------
linear_regression, logistic_regression
Notes
-----
Mediation analysis [1]_ is a *"statistical procedure to test
whether the effect of an independent variable X on a dependent variable
Y (i.e., X → Y) is at least partly explained by a chain of effects of the
independent variable on an intervening mediator variable M and of the
intervening variable on the dependent variable (i.e., X → M → Y)"* [2]_.
The **indirect effect** (also referred to as average causal mediation
effect or ACME) of X on Y through mediator M quantifies the estimated
difference in Y resulting from a one-unit change in X through a sequence of
causal steps in which X affects M, which in turn affects Y.
It is considered significant if the specified confidence interval does not
include 0. The path 'X --> Y' is the sum of both the indirect and direct
effect. It is sometimes referred to as total effect.
A linear regression is used if the mediator variable is continuous and a
logistic regression if the mediator variable is dichotomous (binary).
Multiple parallel mediators are also supported.
This function will only work well if the outcome variable is continuous.
It does not support binary or ordinal outcome variable. For more
advanced mediation models, please refer to the
`lavaan <http://lavaan.ugent.be/tutorial/mediation.html>`_ or `mediation
<https://cran.r-project.org/web/packages/mediation/mediation.pdf>`_ R
packages, or the `PROCESS macro
<https://www.processmacro.org/index.html>`_ for SPSS.
The two-sided p-value of the indirect effect is computed using the
bootstrap distribution, as in the mediation R package. However, the p-value
should be interpreted with caution since it is not constructed
conditioned on a true null hypothesis [3]_ and varies depending on the
number of bootstrap samples and the random seed.
Note that rows with missing values are automatically removed.
Results have been tested against the R mediation package and this tutorial
https://data.library.virginia.edu/introduction-to-mediation-analysis/
References
----------
.. [1] Baron, R. M. & Kenny, D. A. The moderator–mediator variable
distinction in social psychological research: Conceptual, strategic,
and statistical considerations. J. Pers. Soc. Psychol. 51, 1173–1182
(1986).
.. [2] Fiedler, K., Schott, M. & Meiser, T. What mediation analysis can
(not) do. J. Exp. Soc. Psychol. 47, 1231–1236 (2011).
.. [3] Hayes, A. F. & Rockwood, N. J. Regression-based statistical
mediation and moderation analysis in clinical research:
Observations, recommendations, and implementation. Behav. Res.
Ther. 98, 39–57 (2017).
Code originally adapted from https://github.com/rmill040/pymediation.
Examples
--------
1. Simple mediation analysis
>>> from pingouin import mediation_analysis, read_dataset
>>> df = read_dataset('mediation')
>>> mediation_analysis(data=df, x='X', m='M', y='Y', alpha=0.05,
... seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.561015 0.094480 4.391362e-08 0.373522 0.748509 Yes
1 Y ~ M 0.654173 0.085831 1.612674e-11 0.483844 0.824501 Yes
2 Total 0.396126 0.111160 5.671128e-04 0.175533 0.616719 Yes
3 Direct 0.039604 0.109648 7.187429e-01 -0.178018 0.257226 No
4 Indirect 0.356522 0.083313 0.000000e+00 0.219818 0.537654 Yes
2. Return the indirect bootstrapped beta coefficients
>>> stats, dist = mediation_analysis(data=df, x='X', m='M', y='Y',
... return_dist=True)
>>> print(dist.shape)
(500,)
3. Mediation analysis with a binary mediator variable
>>> mediation_analysis(data=df, x='X', m='Mbin', y='Y', seed=42).round(3)
path coef se pval CI[2.5%] CI[97.5%] sig
0 Mbin ~ X -0.021 0.116 0.857 -0.248 0.206 No
1 Y ~ Mbin -0.135 0.412 0.743 -0.952 0.682 No
2 Total 0.396 0.111 0.001 0.176 0.617 Yes
3 Direct 0.396 0.112 0.001 0.174 0.617 Yes
4 Indirect 0.002 0.050 0.960 -0.072 0.146 No
4. Mediation analysis with covariates
>>> mediation_analysis(data=df, x='X', m='M', y='Y',
... covar=['Mbin', 'Ybin'], seed=42).round(3)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.559 0.097 0.000 0.367 0.752 Yes
1 Y ~ M 0.666 0.086 0.000 0.495 0.837 Yes
2 Total 0.420 0.113 0.000 0.196 0.645 Yes
3 Direct 0.064 0.110 0.561 -0.155 0.284 No
4 Indirect 0.356 0.086 0.000 0.209 0.553 Yes
5. Mediation analysis with multiple parallel mediators
>>> mediation_analysis(data=df, x='X', m=['M', 'Mbin'], y='Y',
... seed=42).round(3)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.561 0.094 0.000 0.374 0.749 Yes
1 Mbin ~ X -0.005 0.029 0.859 -0.063 0.052 No
2 Y ~ M 0.654 0.086 0.000 0.482 0.825 Yes
3 Y ~ Mbin -0.064 0.328 0.846 -0.715 0.587 No
4 Total 0.396 0.111 0.001 0.176 0.617 Yes
5 Direct 0.040 0.110 0.721 -0.179 0.258 No
6 Indirect M 0.356 0.085 0.000 0.215 0.538 Yes
7 Indirect Mbin 0.000 0.010 0.952 -0.017 0.025 No
"""
# Sanity check
assert isinstance(x, (str, int)), "y must be a string or int."
assert isinstance(y, (str, int)), "y must be a string or int."
assert isinstance(m, (list, str, int)), "Mediator(s) must be a list, string or int."
assert isinstance(covar, (type(None), str, list, int))
if isinstance(m, (str, int)):
m = [m]
n_mediator = len(m)
assert isinstance(data, pd.DataFrame), "Data must be a DataFrame."
# Check for duplicates
assert n_mediator == len(set(m)), "Cannot have duplicates mediators."
if isinstance(covar, (str, int)):
covar = [covar]
if isinstance(covar, list):
assert len(covar) == len(set(covar)), "Cannot have duplicates covar."
assert set(m).isdisjoint(covar), "Mediator cannot be in covar."
# Check that columns are in dataframe
columns = _fl([x, m, y, covar])
keys = data.columns
assert all([c in keys for c in columns]), "Column(s) are not in DataFrame."
# Check that columns are numeric
err_msg = "Columns must be numeric or boolean."
assert all([data[c].dtype.kind in "bfiu" for c in columns]), err_msg
# Drop rows with NAN Values
data = data[columns].dropna()
n = data.shape[0]
assert n > 5, "DataFrame must have at least 5 samples (rows)."
# Check if mediator is binary
mtype = "logistic" if all(data[m].nunique() == 2) else "linear"
# Check if a dict with kwargs for logistic_regression has been passed
logreg_kwargs = {} if logreg_kwargs is None else logreg_kwargs
# Name of CI
ll_name = "CI[%.1f%%]" % (100 * alpha / 2)
ul_name = "CI[%.1f%%]" % (100 * (1 - alpha / 2))
# Compute regressions
cols = ["names", "coef", "se", "pval", ll_name, ul_name]
# For speed, we pass np.array instead of pandas DataFrame
X_val = data[_fl([x, covar])].to_numpy() # X + covar as predictors
XM_val = data[_fl([x, m, covar])].to_numpy() # X + M + covar as predictors
M_val = data[m].to_numpy() # M as target (no covariates)
y_val = data[y].to_numpy() # y as target (no covariates)
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
# M(j) ~ X + covar
sxm = {}
for idx, j in enumerate(m):
if mtype == "linear":
sxm[j] = linear_regression(X_val, M_val[:, idx], alpha=alpha).loc[[1], cols]
else:
sxm[j] = logistic_regression(X_val, M_val[:, idx], alpha=alpha, **logreg_kwargs).loc[
[1], cols
]
sxm[j].at[1, "names"] = "%s ~ X" % j
sxm = pd.concat(sxm, ignore_index=True)
# Y ~ M + covar
smy = linear_regression(data[_fl([m, covar])], y_val, alpha=alpha).loc[1:n_mediator, cols]
# Average Total Effects (Y ~ X + covar)
sxy = linear_regression(X_val, y_val, alpha=alpha).loc[[1], cols]
# Average Direct Effects (Y ~ X + M + covar)
direct = linear_regression(XM_val, y_val, alpha=alpha).loc[[1], cols]
# Rename paths
smy["names"] = smy["names"].apply(lambda x: "Y ~ %s" % x)
direct.at[1, "names"] = "Direct"
sxy.at[1, "names"] = "Total"
# Concatenate and create sig column
stats = pd.concat((sxm, smy, sxy, direct), ignore_index=True)
stats["sig"] = np.where(stats["pval"] < alpha, "Yes", "No")
# Bootstrap confidence intervals
rng = np.random.RandomState(seed)
idx = rng.choice(np.arange(n), replace=True, size=(n_boot, n))
ab_estimates = np.zeros(shape=(n_boot, n_mediator))
for i in range(n_boot):
ab_estimates[i, :] = _point_estimate(
X_val, XM_val, M_val, y_val, idx[i, :], n_mediator, mtype, **logreg_kwargs
)
ab = _point_estimate(
X_val, XM_val, M_val, y_val, np.arange(n), n_mediator, mtype, **logreg_kwargs
)
indirect = {
"names": m,
"coef": ab,
"se": ab_estimates.std(ddof=1, axis=0),
"pval": [],
ll_name: [],
ul_name: [],
"sig": [],
}
for j in range(n_mediator):
ci_j = _bias_corrected_ci(ab_estimates[:, j], indirect["coef"][j], alpha=alpha)
indirect[ll_name].append(min(ci_j))
indirect[ul_name].append(max(ci_j))
# Bootstrapped p-value of indirect effect
# Note that this is less accurate than a permutation test because the
# bootstrap distribution is not conditioned on a true null hypothesis.
# For more details see Hayes and Rockwood 2017
indirect["pval"].append(_pval_from_bootci(ab_estimates[:, j], indirect["coef"][j]))
indirect["sig"].append("Yes" if indirect["pval"][j] < alpha else "No")
# Create output dataframe
indirect = pd.DataFrame.from_dict(indirect)
if n_mediator == 1:
indirect["names"] = "Indirect"
else:
indirect["names"] = indirect["names"].apply(lambda x: "Indirect %s" % x)
stats = pd.concat([stats, indirect], axis=0, ignore_index=True, sort=False)
stats = stats.rename(columns={"names": "path"})
# Restore options
options.update(old_options)
if return_dist:
return _postprocess_dataframe(stats), np.squeeze(ab_estimates)
else:
return _postprocess_dataframe(stats)
| (data=None, x=None, m=None, y=None, covar=None, alpha=0.05, n_boot=500, seed=None, return_dist=False, logreg_kwargs=None) |
32,021 | pingouin.parametric | mixed_anova | Mixed-design (split-plot) ANOVA.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column containing the dependent variable.
within : string
Name of column containing the within-subject factor
(repeated measurements).
subject : string
Name of column containing the between-subject identifier.
between : string
Name of column containing the between factor.
correction : string or boolean
If True, return Greenhouse-Geisser corrected p-value.
If `'auto'` (default), compute Mauchly's test of sphericity to
determine whether the p-values needs to be corrected.
effsize : str
Effect size. Must be one of 'np2' (partial eta-squared), 'n2'
(eta-squared) or 'ng2'(generalized eta-squared).
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANOVA summary:
* ``'Source'``: Names of the factor considered
* ``'ddof1'``: Degrees of freedom (numerator)
* ``'ddof2'``: Degrees of freedom (denominator)
* ``'F'``: F-values
* ``'p-unc'``: Uncorrected p-values
* ``'np2'``: Partial eta-squared effect sizes
* ``'eps'``: Greenhouse-Geisser epsilon factor (= index of sphericity)
* ``'p-GG-corr'``: Greenhouse-Geisser corrected p-values
* ``'W-spher'``: Sphericity test statistic
* ``'p-spher'``: p-value of the sphericity test
* ``'sphericity'``: sphericity of the data (boolean)
See Also
--------
anova, rm_anova, pairwise_tests
Notes
-----
Data are expected to be in long-format (even the repeated measures).
If your data is in wide-format, you can use the :py:func:`pandas.melt()`
function to convert from wide to long format.
Missing values are automatically removed using a strict listwise approach (= complete-case
analysis). In other words, any subject with one or more missing value(s) is completely removed
from the dataframe prior to running the test. This could drastically decrease the power of the
ANOVA if many missing values are present. In that case, we strongly recommend using linear
mixed effect modelling, which can handle missing values in repeated measures.
.. warning :: If the between-subject groups are unbalanced (= unequal sample sizes),
a type II ANOVA will be computed. Note however that SPSS, JAMOVI and JASP by default
return a type III ANOVA, which may lead to slightly different results.
Examples
--------
For more examples, please refer to the `Jupyter notebooks
<https://github.com/raphaelvallat/pingouin/blob/master/notebooks/01_ANOVA.ipynb>`_
Compute a two-way mixed model ANOVA.
>>> from pingouin import mixed_anova, read_dataset
>>> df = read_dataset('mixed_anova')
>>> aov = mixed_anova(dv='Scores', between='Group',
... within='Time', subject='Subject', data=df)
>>> aov.round(3)
Source SS DF1 DF2 MS F p-unc np2 eps
0 Group 5.460 1 58 5.460 5.052 0.028 0.080 NaN
1 Time 7.628 2 116 3.814 4.027 0.020 0.065 0.999
2 Interaction 5.167 2 116 2.584 2.728 0.070 0.045 NaN
Same but reporting a generalized eta-squared effect size. Notice how we
can also apply this function directly as a method of the dataframe, in
which case we do not need to specify ``data=df`` anymore.
>>> df.mixed_anova(dv='Scores', between='Group', within='Time',
... subject='Subject', effsize="ng2").round(3)
Source SS DF1 DF2 MS F p-unc ng2 eps
0 Group 5.460 1 58 5.460 5.052 0.028 0.031 NaN
1 Time 7.628 2 116 3.814 4.027 0.020 0.042 0.999
2 Interaction 5.167 2 116 2.584 2.728 0.070 0.029 NaN
| @pf.register_dataframe_method
def mixed_anova(
data=None, dv=None, within=None, subject=None, between=None, correction="auto", effsize="np2"
):
"""Mixed-design (split-plot) ANOVA.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column containing the dependent variable.
within : string
Name of column containing the within-subject factor
(repeated measurements).
subject : string
Name of column containing the between-subject identifier.
between : string
Name of column containing the between factor.
correction : string or boolean
If True, return Greenhouse-Geisser corrected p-value.
If `'auto'` (default), compute Mauchly's test of sphericity to
determine whether the p-values needs to be corrected.
effsize : str
Effect size. Must be one of 'np2' (partial eta-squared), 'n2'
(eta-squared) or 'ng2'(generalized eta-squared).
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANOVA summary:
* ``'Source'``: Names of the factor considered
* ``'ddof1'``: Degrees of freedom (numerator)
* ``'ddof2'``: Degrees of freedom (denominator)
* ``'F'``: F-values
* ``'p-unc'``: Uncorrected p-values
* ``'np2'``: Partial eta-squared effect sizes
* ``'eps'``: Greenhouse-Geisser epsilon factor (= index of sphericity)
* ``'p-GG-corr'``: Greenhouse-Geisser corrected p-values
* ``'W-spher'``: Sphericity test statistic
* ``'p-spher'``: p-value of the sphericity test
* ``'sphericity'``: sphericity of the data (boolean)
See Also
--------
anova, rm_anova, pairwise_tests
Notes
-----
Data are expected to be in long-format (even the repeated measures).
If your data is in wide-format, you can use the :py:func:`pandas.melt()`
function to convert from wide to long format.
Missing values are automatically removed using a strict listwise approach (= complete-case
analysis). In other words, any subject with one or more missing value(s) is completely removed
from the dataframe prior to running the test. This could drastically decrease the power of the
ANOVA if many missing values are present. In that case, we strongly recommend using linear
mixed effect modelling, which can handle missing values in repeated measures.
.. warning :: If the between-subject groups are unbalanced (= unequal sample sizes),
a type II ANOVA will be computed. Note however that SPSS, JAMOVI and JASP by default
return a type III ANOVA, which may lead to slightly different results.
Examples
--------
For more examples, please refer to the `Jupyter notebooks
<https://github.com/raphaelvallat/pingouin/blob/master/notebooks/01_ANOVA.ipynb>`_
Compute a two-way mixed model ANOVA.
>>> from pingouin import mixed_anova, read_dataset
>>> df = read_dataset('mixed_anova')
>>> aov = mixed_anova(dv='Scores', between='Group',
... within='Time', subject='Subject', data=df)
>>> aov.round(3)
Source SS DF1 DF2 MS F p-unc np2 eps
0 Group 5.460 1 58 5.460 5.052 0.028 0.080 NaN
1 Time 7.628 2 116 3.814 4.027 0.020 0.065 0.999
2 Interaction 5.167 2 116 2.584 2.728 0.070 0.045 NaN
Same but reporting a generalized eta-squared effect size. Notice how we
can also apply this function directly as a method of the dataframe, in
which case we do not need to specify ``data=df`` anymore.
>>> df.mixed_anova(dv='Scores', between='Group', within='Time',
... subject='Subject', effsize="ng2").round(3)
Source SS DF1 DF2 MS F p-unc ng2 eps
0 Group 5.460 1 58 5.460 5.052 0.028 0.031 NaN
1 Time 7.628 2 116 3.814 4.027 0.020 0.042 0.999
2 Interaction 5.167 2 116 2.584 2.728 0.070 0.029 NaN
"""
assert effsize in ["n2", "np2", "ng2"], "effsize must be n2, np2 or ng2."
# Check that only a single within and between factor are provided
one_is_list = isinstance(within, list) or isinstance(between, list)
both_are_str = isinstance(within, (str, int)) and isinstance(between, (str, int))
if one_is_list or not both_are_str:
raise ValueError(
"within and between factors must both be strings referring to a column in the data. "
"Specifying multiple within and between factors is currently not supported. "
"For more information, see: https://github.com/raphaelvallat/pingouin/issues/136"
)
# Check data
data = _check_dataframe(
dv=dv, within=within, between=between, data=data, subject=subject, effects="interaction"
)
# Pivot and melt the table. This has several effects:
# 1) Force missing values to be explicit (a NaN cell is created)
# 2) Automatic collapsing to the mean if multiple within factors are present
# 3) If using dropna, remove rows with missing values (listwise deletion).
# The latter is the same behavior as JASP (= strict complete-case analysis).
data_piv = data.pivot_table(index=[subject, between], columns=within, values=dv, observed=True)
data_piv = data_piv.dropna()
data = data_piv.melt(ignore_index=False, value_name=dv).reset_index()
# Check that subject IDs do not overlap between groups: the subject ID
# should have a unique range / set of values for each between-subject
# group e.g. group1= 1 --> 20 and group2 = 21 --> 40.
if not (data.groupby([subject, within], observed=True)[between].nunique() == 1).all():
raise ValueError(
"Subject IDs cannot overlap between groups: each "
"group in `%s` must have a unique set of "
"subject IDs, e.g. group1 = [1, 2, 3, ..., 10] "
"and group2 = [11, 12, 13, ..., 20]" % between
)
# SUMS OF SQUARES
grandmean = data[dv].mean(numeric_only=True)
ss_total = ((data[dv] - grandmean) ** 2).sum()
# Extract main effects of within and between factors
aov_with = rm_anova(
dv=dv, within=within, subject=subject, data=data, correction=correction, detailed=True
)
aov_betw = anova(dv=dv, between=between, data=data, detailed=True)
ss_betw = aov_betw.at[0, "SS"]
ss_with = aov_with.at[0, "SS"]
# Extract residuals and interactions
grp = data.groupby([between, within], observed=True, group_keys=False)[dv]
# ssresall = residuals within + residuals between
ss_resall = grp.apply(lambda x: (x - x.mean()) ** 2).sum()
# Interaction
ss_inter = ss_total - (ss_resall + ss_with + ss_betw)
ss_reswith = aov_with.at[1, "SS"] - ss_inter
ss_resbetw = ss_total - (ss_with + ss_betw + ss_reswith + ss_inter)
# DEGREES OF FREEDOM
n_obs = data.groupby(within, observed=True)[dv].count().max()
df_with = aov_with.at[0, "DF"]
df_betw = aov_betw.at[0, "DF"]
df_resbetw = n_obs - data.groupby(between, observed=True)[dv].count().count()
df_reswith = df_with * df_resbetw
df_inter = aov_with.at[0, "DF"] * aov_betw.at[0, "DF"]
# MEAN SQUARES
ms_betw = aov_betw.at[0, "MS"]
ms_with = aov_with.at[0, "MS"]
ms_resbetw = ss_resbetw / df_resbetw
ms_reswith = ss_reswith / df_reswith
ms_inter = ss_inter / df_inter
# F VALUES
f_betw = ms_betw / ms_resbetw
f_with = ms_with / ms_reswith
f_inter = ms_inter / ms_reswith
# P-values
p_betw = f(df_betw, df_resbetw).sf(f_betw)
p_with = f(df_with, df_reswith).sf(f_with)
p_inter = f(df_inter, df_reswith).sf(f_inter)
# Effects sizes (see Bakeman 2005)
if effsize == "n2":
# Standard eta-squared
ef_betw = ss_betw / ss_total
ef_with = ss_with / ss_total
ef_inter = ss_inter / ss_total
elif effsize == "ng2":
# Generalized eta-square
ef_betw = ss_betw / (ss_betw + ss_resall)
ef_with = ss_with / (ss_with + ss_resall)
ef_inter = ss_inter / (ss_inter + ss_resall)
else:
# Partial eta-squared (default)
# ef_betw = f_betw * df_betw / (f_betw * df_betw + df_resbetw)
# ef_with = f_with * df_with / (f_with * df_with + df_reswith)
ef_betw = ss_betw / (ss_betw + ss_resbetw)
ef_with = ss_with / (ss_with + ss_reswith)
ef_inter = ss_inter / (ss_inter + ss_reswith)
# Stats table
aov = pd.concat([aov_betw.drop(1), aov_with.drop(1)], axis=0, sort=False, ignore_index=True)
# Update values
aov.rename(columns={"DF": "DF1"}, inplace=True)
aov.at[0, "F"], aov.at[1, "F"] = f_betw, f_with
aov.at[0, "p-unc"], aov.at[1, "p-unc"] = p_betw, p_with
aov.at[0, effsize], aov.at[1, effsize] = ef_betw, ef_with
aov_inter = pd.DataFrame(
{
"Source": "Interaction",
"SS": ss_inter,
"DF1": df_inter,
"MS": ms_inter,
"F": f_inter,
"p-unc": p_inter,
effsize: ef_inter,
},
index=[2],
)
aov = pd.concat([aov, aov_inter], axis=0, sort=False, ignore_index=True)
aov["DF2"] = [df_resbetw, df_reswith, df_reswith]
aov["eps"] = [np.nan, aov_with.at[0, "eps"], np.nan]
col_order = [
"Source",
"SS",
"DF1",
"DF2",
"MS",
"F",
"p-unc",
"p-GG-corr",
effsize,
"eps",
"sphericity",
"W-spher",
"p-spher",
]
aov = aov.reindex(columns=col_order)
aov.dropna(how="all", axis=1, inplace=True)
return _postprocess_dataframe(aov)
| (data=None, dv=None, within=None, subject=None, between=None, correction='auto', effsize='np2') |
32,022 | pingouin.multicomp | multicomp | P-values correction for multiple comparisons.
Parameters
----------
pvals : array_like
Uncorrected p-values.
alpha : float
Significance level.
method : string
Method used for testing and adjustment of p-values. Can be either the
full name or initial letters. Available methods are:
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
* ``'none'``: pass-through option (no correction applied)
Returns
-------
reject : array, boolean
True for hypothesis that can be rejected for given alpha.
pvals_corrected : array
P-values corrected for multiple testing.
Notes
-----
This function is similar to the `p.adjust
<https://stat.ethz.ch/R-manual/R-devel/library/stats/html/p.adjust.html>`_
R function.
The correction methods include the Bonferroni correction (``'bonf'``)
in which the p-values are multiplied by the number of comparisons.
Less conservative methods are also included such as Sidak (1967)
(``'sidak'``), Holm (1979) (``'holm'``), Benjamini & Hochberg (1995)
(``'fdr_bh'``), and Benjamini & Yekutieli (2001) (``'fdr_by'``),
respectively.
The first three methods are designed to give strong control of the
family-wise error rate. Note that the Holm's method is usually preferred.
The ``'fdr_bh'`` and ``'fdr_by'`` methods control the false discovery rate,
i.e. the expected proportion of false discoveries amongst the rejected
hypotheses. The false discovery rate is a less stringent condition than
the family-wise error rate, so these methods are more powerful than the
others.
The **Bonferroni** [1]_ adjusted p-values are defined as:
.. math::
\widetilde {p}_{{(i)}}= n \cdot p_{{(i)}}
where :math:`n` is the number of *finite* p-values (i.e. excluding NaN).
The **Sidak** [2]_ adjusted p-values are defined as:
.. math::
\widetilde {p}_{{(i)}}= 1 - (1 - p_{{(i)}})^{n}
The **Holm** [3]_ adjusted p-values are the running maximum of the sorted
p-values divided by the corresponding increasing alpha level:
.. math::
\widetilde {p}_{{(i)}}=\max _{{j\leq i}}\left\{(n-j+1)p_{{(j)}}
\right\}_{{1}}
The **Benjamini–Hochberg** procedure (BH step-up procedure, [4]_)
controls the false discovery rate (FDR) at level :math:`\alpha`.
It works as follows:
1. For a given :math:`\alpha`, find the largest :math:`k` such that
:math:`P_{(k)}\leq \frac {k}{n}\alpha.`
2. Reject the null hypothesis for all
:math:`H_{(i)}` for :math:`i = 1, \ldots, k`.
The BH procedure is valid when the :math:`n` tests are independent, and
also in various scenarios of dependence, but is not universally valid.
The **Benjamini–Yekutieli** procedure (BY, [5]_) controls the FDR under
arbitrary dependence assumptions. This refinement modifies the threshold
and finds the largest :math:`k` such that:
.. math::
P_{(k)} \leq \frac{k}{n \cdot c(n)} \alpha
References
----------
.. [1] Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
.. [2] Šidák, Z. K. (1967). "Rectangular Confidence Regions for the Means
of Multivariate Normal Distributions". Journal of the American
Statistical Association. 62 (318): 626–633.
.. [3] Holm, S. (1979). A simple sequentially rejective multiple test
procedure. Scandinavian Journal of Statistics, 6, 65–70.
.. [4] Benjamini, Y., and Hochberg, Y. (1995). Controlling the false
discovery rate: a practical and powerful approach to multiple testing.
Journal of the Royal Statistical Society Series B, 57, 289–300.
.. [5] Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
Examples
--------
FDR correction of an array of p-values
>>> import pingouin as pg
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = pg.multicomp(pvals, method='fdr_bh')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
Holm correction with missing values
>>> import numpy as np
>>> pvals[2] = np.nan
>>> reject, pvals_corr = pg.multicomp(pvals, method='holm')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.009 nan 0.108 0.0012]
| def multicomp(pvals, alpha=0.05, method="holm"):
"""P-values correction for multiple comparisons.
Parameters
----------
pvals : array_like
Uncorrected p-values.
alpha : float
Significance level.
method : string
Method used for testing and adjustment of p-values. Can be either the
full name or initial letters. Available methods are:
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
* ``'none'``: pass-through option (no correction applied)
Returns
-------
reject : array, boolean
True for hypothesis that can be rejected for given alpha.
pvals_corrected : array
P-values corrected for multiple testing.
Notes
-----
This function is similar to the `p.adjust
<https://stat.ethz.ch/R-manual/R-devel/library/stats/html/p.adjust.html>`_
R function.
The correction methods include the Bonferroni correction (``'bonf'``)
in which the p-values are multiplied by the number of comparisons.
Less conservative methods are also included such as Sidak (1967)
(``'sidak'``), Holm (1979) (``'holm'``), Benjamini & Hochberg (1995)
(``'fdr_bh'``), and Benjamini & Yekutieli (2001) (``'fdr_by'``),
respectively.
The first three methods are designed to give strong control of the
family-wise error rate. Note that the Holm's method is usually preferred.
The ``'fdr_bh'`` and ``'fdr_by'`` methods control the false discovery rate,
i.e. the expected proportion of false discoveries amongst the rejected
hypotheses. The false discovery rate is a less stringent condition than
the family-wise error rate, so these methods are more powerful than the
others.
The **Bonferroni** [1]_ adjusted p-values are defined as:
.. math::
\\widetilde {p}_{{(i)}}= n \\cdot p_{{(i)}}
where :math:`n` is the number of *finite* p-values (i.e. excluding NaN).
The **Sidak** [2]_ adjusted p-values are defined as:
.. math::
\\widetilde {p}_{{(i)}}= 1 - (1 - p_{{(i)}})^{n}
The **Holm** [3]_ adjusted p-values are the running maximum of the sorted
p-values divided by the corresponding increasing alpha level:
.. math::
\\widetilde {p}_{{(i)}}=\\max _{{j\\leq i}}\\left\\{(n-j+1)p_{{(j)}}
\\right\\}_{{1}}
The **Benjamini–Hochberg** procedure (BH step-up procedure, [4]_)
controls the false discovery rate (FDR) at level :math:`\\alpha`.
It works as follows:
1. For a given :math:`\\alpha`, find the largest :math:`k` such that
:math:`P_{(k)}\\leq \\frac {k}{n}\\alpha.`
2. Reject the null hypothesis for all
:math:`H_{(i)}` for :math:`i = 1, \\ldots, k`.
The BH procedure is valid when the :math:`n` tests are independent, and
also in various scenarios of dependence, but is not universally valid.
The **Benjamini–Yekutieli** procedure (BY, [5]_) controls the FDR under
arbitrary dependence assumptions. This refinement modifies the threshold
and finds the largest :math:`k` such that:
.. math::
P_{(k)} \\leq \\frac{k}{n \\cdot c(n)} \\alpha
References
----------
.. [1] Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
.. [2] Šidák, Z. K. (1967). "Rectangular Confidence Regions for the Means
of Multivariate Normal Distributions". Journal of the American
Statistical Association. 62 (318): 626–633.
.. [3] Holm, S. (1979). A simple sequentially rejective multiple test
procedure. Scandinavian Journal of Statistics, 6, 65–70.
.. [4] Benjamini, Y., and Hochberg, Y. (1995). Controlling the false
discovery rate: a practical and powerful approach to multiple testing.
Journal of the Royal Statistical Society Series B, 57, 289–300.
.. [5] Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
Examples
--------
FDR correction of an array of p-values
>>> import pingouin as pg
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = pg.multicomp(pvals, method='fdr_bh')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
Holm correction with missing values
>>> import numpy as np
>>> pvals[2] = np.nan
>>> reject, pvals_corr = pg.multicomp(pvals, method='holm')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.009 nan 0.108 0.0012]
"""
# Safety check
assert isinstance(pvals, (list, np.ndarray, Series)), "pvals must be list or array"
assert isinstance(alpha, float), "alpha must be a float."
assert isinstance(method, str), "method must be a string."
assert 0 < alpha < 1, "alpha must be between 0 and 1."
pvals = np.asarray(pvals)
if method.lower() in ["b", "bonf", "bonferroni"]:
reject, pvals_corrected = bonf(pvals, alpha=alpha)
elif method.lower() in ["h", "holm"]:
reject, pvals_corrected = holm(pvals, alpha=alpha)
elif method.lower() in ["s", "sidak"]:
reject, pvals_corrected = sidak(pvals, alpha=alpha)
elif method.lower() in ["fdr", "fdr_bh", "bh"]:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method="fdr_bh")
elif method.lower() in ["fdr_by", "by"]:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method="fdr_by")
elif method.lower() == "none":
pvals_corrected = pvals
with np.errstate(invalid="ignore"):
reject = np.less(pvals_corrected, alpha)
else:
raise ValueError("Multiple comparison method not recognized")
return reject, pvals_corrected
| (pvals, alpha=0.05, method='holm') |
32,024 | pingouin.multivariate | multivariate_normality | Henze-Zirkler multivariate normality test.
Parameters
----------
X : np.array
Data matrix of shape (n_samples, n_features).
alpha : float
Significance level.
Returns
-------
hz : float
The Henze-Zirkler test statistic.
pval : float
P-value.
normal : boolean
True if X comes from a multivariate normal distribution.
See Also
--------
normality : Test the univariate normality of one or more variables.
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Henze-Zirkler test [1]_ has a good overall power against alternatives
to normality and works for any dimension and sample size.
Adapted to Python from a Matlab code [2]_ by Antonio Trujillo-Ortiz and
tested against the
`MVN <https://cran.r-project.org/web/packages/MVN/MVN.pdf>`_ R package.
Rows with missing values are automatically removed.
References
----------
.. [1] Henze, N., & Zirkler, B. (1990). A class of invariant consistent
tests for multivariate normality. Communications in Statistics-Theory
and Methods, 19(10), 3595-3617.
.. [2] Trujillo-Ortiz, A., R. Hernandez-Walls, K. Barba-Rojo and L.
Cupul-Magana. (2007). HZmvntest: Henze-Zirkler's Multivariate
Normality Test. A MATLAB file.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('multivariate')
>>> X = data[['Fever', 'Pressure', 'Aches']]
>>> pg.multivariate_normality(X, alpha=.05)
HZResults(hz=0.540086101851555, pval=0.7173686509622386, normal=True)
| def multivariate_normality(X, alpha=0.05):
"""Henze-Zirkler multivariate normality test.
Parameters
----------
X : np.array
Data matrix of shape (n_samples, n_features).
alpha : float
Significance level.
Returns
-------
hz : float
The Henze-Zirkler test statistic.
pval : float
P-value.
normal : boolean
True if X comes from a multivariate normal distribution.
See Also
--------
normality : Test the univariate normality of one or more variables.
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Henze-Zirkler test [1]_ has a good overall power against alternatives
to normality and works for any dimension and sample size.
Adapted to Python from a Matlab code [2]_ by Antonio Trujillo-Ortiz and
tested against the
`MVN <https://cran.r-project.org/web/packages/MVN/MVN.pdf>`_ R package.
Rows with missing values are automatically removed.
References
----------
.. [1] Henze, N., & Zirkler, B. (1990). A class of invariant consistent
tests for multivariate normality. Communications in Statistics-Theory
and Methods, 19(10), 3595-3617.
.. [2] Trujillo-Ortiz, A., R. Hernandez-Walls, K. Barba-Rojo and L.
Cupul-Magana. (2007). HZmvntest: Henze-Zirkler's Multivariate
Normality Test. A MATLAB file.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('multivariate')
>>> X = data[['Fever', 'Pressure', 'Aches']]
>>> pg.multivariate_normality(X, alpha=.05)
HZResults(hz=0.540086101851555, pval=0.7173686509622386, normal=True)
"""
from scipy.stats import lognorm
# Check input and remove missing values
X = np.asarray(X)
assert X.ndim == 2, "X must be of shape (n_samples, n_features)."
X = X[~np.isnan(X).any(axis=1)]
n, p = X.shape
assert n >= 3, "X must have at least 3 rows."
assert p >= 2, "X must have at least two columns."
# Covariance matrix
S = np.cov(X, rowvar=False, bias=True)
S_inv = np.linalg.pinv(S, hermitian=True).astype(X.dtype) # Preserving original dtype
difT = X - X.mean(0)
# Squared-Mahalanobis distances
Dj = np.diag(np.linalg.multi_dot([difT, S_inv, difT.T]))
Y = np.linalg.multi_dot([X, S_inv, X.T])
Djk = -2 * Y.T + np.repeat(np.diag(Y.T), n).reshape(n, -1) + np.tile(np.diag(Y.T), (n, 1))
# Smoothing parameter
b = 1 / (np.sqrt(2)) * ((2 * p + 1) / 4) ** (1 / (p + 4)) * (n ** (1 / (p + 4)))
# Is matrix full-rank (columns are linearly independent)?
if np.linalg.matrix_rank(S) == p:
hz = n * (
1 / (n**2) * np.sum(np.sum(np.exp(-(b**2) / 2 * Djk)))
- 2
* ((1 + (b**2)) ** (-p / 2))
* (1 / n)
* (np.sum(np.exp(-((b**2) / (2 * (1 + (b**2)))) * Dj)))
+ ((1 + (2 * (b**2))) ** (-p / 2))
)
else:
hz = n * 4
wb = (1 + b**2) * (1 + 3 * b**2)
a = 1 + 2 * b**2
# Mean and variance
mu = 1 - a ** (-p / 2) * (1 + p * b**2 / a + (p * (p + 2) * (b**4)) / (2 * a**2))
si2 = (
2 * (1 + 4 * b**2) ** (-p / 2)
+ 2
* a ** (-p)
* (1 + (2 * p * b**4) / a**2 + (3 * p * (p + 2) * b**8) / (4 * a**4))
- 4
* wb ** (-p / 2)
* (1 + (3 * p * b**4) / (2 * wb) + (p * (p + 2) * b**8) / (2 * wb**2))
)
# Lognormal mean and variance
pmu = np.log(np.sqrt(mu**4 / (si2 + mu**2)))
psi = np.sqrt(np.log1p(si2 / mu**2))
# P-value
pval = lognorm.sf(hz, psi, scale=np.exp(pmu))
normal = True if pval > alpha else False
HZResults = namedtuple("HZResults", ["hz", "pval", "normal"])
return HZResults(hz=hz, pval=pval, normal=normal)
| (X, alpha=0.05) |
32,025 | pingouin.multivariate | multivariate_ttest | Hotelling T-squared test (= multivariate T-test)
Parameters
----------
X : np.array
First data matrix of shape (n_samples, n_features).
Y : np.array or None
Second data matrix of shape (n_samples, n_features). If ``Y`` is a 1D
array of shape (n_features), a one-sample test is performed where the
null hypothesis is defined in ``Y``. If ``Y`` is None, a one-sample
is performed against np.zeros(n_features).
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent. If ``paired`` is True, ``X`` and ``Y`` must
have exactly the same shape.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'T2'``: T-squared value
* ``'F'``: F-value
* ``'df1'``: first degree of freedom
* ``'df2'``: second degree of freedom
* ``'p-val'``: p-value
See Also
--------
multivariate_normality : Multivariate normality test.
ttest : Univariate T-test.
Notes
-----
The Hotelling 's T-squared test [1]_ is the multivariate counterpart of
the T-test.
Rows with missing values are automatically removed using the
:py:func:`remove_na` function.
Tested against the `Hotelling
<https://cran.r-project.org/web/packages/Hotelling/Hotelling.pdf>`_ R
package.
References
----------
.. [1] Hotelling, H. The Generalization of Student's Ratio. Ann. Math.
Statist. 2 (1931), no. 3, 360--378.
See also http://www.real-statistics.com/multivariate-statistics/
Examples
--------
Two-sample independent Hotelling T-squared test
>>> import pingouin as pg
>>> data = pg.read_dataset('multivariate')
>>> dvs = ['Fever', 'Pressure', 'Aches']
>>> X = data[data['Condition'] == 'Drug'][dvs]
>>> Y = data[data['Condition'] == 'Placebo'][dvs]
>>> pg.multivariate_ttest(X, Y)
T2 F df1 df2 pval
hotelling 4.228679 1.326644 3 32 0.282898
Two-sample paired Hotelling T-squared test
>>> pg.multivariate_ttest(X, Y, paired=True)
T2 F df1 df2 pval
hotelling 4.468456 1.314252 3 15 0.306542
One-sample Hotelling T-squared test with a specified null hypothesis
>>> null_hypothesis_means = [37.5, 70, 5]
>>> pg.multivariate_ttest(X, Y=null_hypothesis_means)
T2 F df1 df2 pval
hotelling 253.230991 74.479703 3 15 3.081281e-09
| def multivariate_ttest(X, Y=None, paired=False):
"""Hotelling T-squared test (= multivariate T-test)
Parameters
----------
X : np.array
First data matrix of shape (n_samples, n_features).
Y : np.array or None
Second data matrix of shape (n_samples, n_features). If ``Y`` is a 1D
array of shape (n_features), a one-sample test is performed where the
null hypothesis is defined in ``Y``. If ``Y`` is None, a one-sample
is performed against np.zeros(n_features).
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent. If ``paired`` is True, ``X`` and ``Y`` must
have exactly the same shape.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'T2'``: T-squared value
* ``'F'``: F-value
* ``'df1'``: first degree of freedom
* ``'df2'``: second degree of freedom
* ``'p-val'``: p-value
See Also
--------
multivariate_normality : Multivariate normality test.
ttest : Univariate T-test.
Notes
-----
The Hotelling 's T-squared test [1]_ is the multivariate counterpart of
the T-test.
Rows with missing values are automatically removed using the
:py:func:`remove_na` function.
Tested against the `Hotelling
<https://cran.r-project.org/web/packages/Hotelling/Hotelling.pdf>`_ R
package.
References
----------
.. [1] Hotelling, H. The Generalization of Student's Ratio. Ann. Math.
Statist. 2 (1931), no. 3, 360--378.
See also http://www.real-statistics.com/multivariate-statistics/
Examples
--------
Two-sample independent Hotelling T-squared test
>>> import pingouin as pg
>>> data = pg.read_dataset('multivariate')
>>> dvs = ['Fever', 'Pressure', 'Aches']
>>> X = data[data['Condition'] == 'Drug'][dvs]
>>> Y = data[data['Condition'] == 'Placebo'][dvs]
>>> pg.multivariate_ttest(X, Y)
T2 F df1 df2 pval
hotelling 4.228679 1.326644 3 32 0.282898
Two-sample paired Hotelling T-squared test
>>> pg.multivariate_ttest(X, Y, paired=True)
T2 F df1 df2 pval
hotelling 4.468456 1.314252 3 15 0.306542
One-sample Hotelling T-squared test with a specified null hypothesis
>>> null_hypothesis_means = [37.5, 70, 5]
>>> pg.multivariate_ttest(X, Y=null_hypothesis_means)
T2 F df1 df2 pval
hotelling 253.230991 74.479703 3 15 3.081281e-09
"""
from scipy.stats import f
x = np.asarray(X)
assert x.ndim == 2, "x must be of shape (n_samples, n_features)"
if Y is None:
y = np.zeros(x.shape[1])
# Remove rows with missing values in x
x = x[~np.isnan(x).any(axis=1)]
else:
nx, kx = x.shape
y = np.asarray(Y)
assert y.ndim in [1, 2], "Y must be 1D or 2D."
if y.ndim == 1:
# One sample with specified null
assert y.size == kx
else:
# Two-sample
err = "X and Y must have the same number of features (= columns)."
assert y.shape[1] == kx, err
if paired:
err = "X and Y must have the same number of rows if paired."
assert y.shape[0] == nx, err
# Remove rows with missing values in both x and y
x, y = remove_na(x, y, paired=paired, axis="rows")
# Shape of arrays
nx, k = x.shape
ny = y.shape[0]
assert nx >= 5, "At least five samples are required."
if y.ndim == 1 or paired is True:
n = nx
if y.ndim == 1:
# One sample test
cov = np.cov(x, rowvar=False)
diff = x.mean(0) - y
else:
# Paired two sample
cov = np.cov(x - y, rowvar=False)
diff = x.mean(0) - y.mean(0)
inv_cov = np.linalg.pinv(cov, hermitian=True)
t2 = (diff @ inv_cov) @ diff * n
else:
n = nx + ny - 1
x_cov = np.cov(x, rowvar=False)
y_cov = np.cov(y, rowvar=False)
pooled_cov = ((nx - 1) * x_cov + (ny - 1) * y_cov) / (n - 1)
inv_cov = np.linalg.pinv((1 / nx + 1 / ny) * pooled_cov, hermitian=True)
diff = x.mean(0) - y.mean(0)
t2 = (diff @ inv_cov) @ diff
# F-value, degrees of freedom and p-value
fval = t2 * (n - k) / (k * (n - 1))
df1 = k
df2 = n - k
pval = f.sf(fval, df1, df2)
# Create output dictionnary
stats = {"T2": t2, "F": fval, "df1": df1, "df2": df2, "pval": pval}
stats = pd.DataFrame(stats, index=["hotelling"])
return _postprocess_dataframe(stats)
| (X, Y=None, paired=False) |
32,026 | pingouin.nonparametric | mwu | Mann-Whitney U Test (= Wilcoxon rank-sum test). It is the non-parametric
version of the independent T-test.
Parameters
----------
x, y : array_like
First and second set of observations. ``x`` and ``y`` must be
independent.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less". See :py:func:`scipy.stats.mannwhitneyu` for
more details.
**kwargs : dict
Additional keywords arguments that are passed to :py:func:`scipy.stats.mannwhitneyu`.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'U-val'``: U-value
* ``'alternative'``: tail of the test
* ``'p-val'``: p-value
* ``'RBC'`` : rank-biserial correlation
* ``'CLES'`` : common language effect size
See also
--------
scipy.stats.mannwhitneyu, wilcoxon, ttest
Notes
-----
The Mann–Whitney U test [1]_ (also called Wilcoxon rank-sum test) is a
non-parametric test of the null hypothesis that it is equally likely that
a randomly selected value from one sample will be less than or greater
than a randomly selected value from a second sample. The test assumes
that the two samples are independent. This test corrects for ties and by
default uses a continuity correction (see :py:func:`scipy.stats.mannwhitneyu` for details).
The rank biserial correlation [2]_ is the difference between
the proportion of favorable evidence minus the proportion of unfavorable
evidence.
The common language effect size is the proportion of pairs where ``x`` is
higher than ``y``. It was first introduced by McGraw and Wong (1992) [3]_.
Pingouin uses a brute-force version of the formula given by Vargha and
Delaney 2000 [4]_:
.. math:: \text{CL} = P(X > Y) + .5 \times P(X = Y)
The advantage is of this method are twofold. First, the brute-force
approach pairs each observation of ``x`` to its ``y`` counterpart, and
therefore does not require normally distributed data. Second, the formula
takes ties into account and therefore works with ordinal data.
When tail is ``'less'``, the CLES is then set to :math:`1 - \text{CL}`,
which gives the proportion of pairs where ``x`` is *lower* than ``y``.
References
----------
.. [1] Mann, H. B., & Whitney, D. R. (1947). On a test of whether one of
two random variables is stochastically larger than the other.
The annals of mathematical statistics, 50-60.
.. [2] Kerby, D. S. (2014). The simple difference formula: An approach to
teaching nonparametric correlation. Comprehensive Psychology,
3, 11-IT.
.. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size
statistic. Psychological bulletin, 111(2), 361.
.. [4] Vargha, A., & Delaney, H. D. (2000). A Critique and Improvement of
the “CL” Common Language Effect Size Statistics of McGraw and Wong.
Journal of Educational and Behavioral Statistics: A Quarterly
Publication Sponsored by the American Educational Research
Association and the American Statistical Association, 25(2),
101–132. https://doi.org/10.2307/1165329
Examples
--------
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.uniform(low=0, high=1, size=20)
>>> y = np.random.uniform(low=0.2, high=1.2, size=20)
>>> pg.mwu(x, y, alternative='two-sided')
U-val alternative p-val RBC CLES
MWU 97.0 two-sided 0.00556 0.515 0.2425
Compare with SciPy
>>> import scipy
>>> scipy.stats.mannwhitneyu(x, y, use_continuity=True, alternative='two-sided')
MannwhitneyuResult(statistic=97.0, pvalue=0.0055604599321374135)
One-sided test
>>> pg.mwu(x, y, alternative='greater')
U-val alternative p-val RBC CLES
MWU 97.0 greater 0.997442 0.515 0.2425
>>> pg.mwu(x, y, alternative='less')
U-val alternative p-val RBC CLES
MWU 97.0 less 0.00278 0.515 0.7575
Passing keyword arguments to :py:func:`scipy.stats.mannwhitneyu`:
>>> pg.mwu(x, y, alternative='two-sided', method='exact')
U-val alternative p-val RBC CLES
MWU 97.0 two-sided 0.004681 0.515 0.2425
| def mwu(x, y, alternative="two-sided", **kwargs):
"""Mann-Whitney U Test (= Wilcoxon rank-sum test). It is the non-parametric
version of the independent T-test.
Parameters
----------
x, y : array_like
First and second set of observations. ``x`` and ``y`` must be
independent.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less". See :py:func:`scipy.stats.mannwhitneyu` for
more details.
**kwargs : dict
Additional keywords arguments that are passed to :py:func:`scipy.stats.mannwhitneyu`.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'U-val'``: U-value
* ``'alternative'``: tail of the test
* ``'p-val'``: p-value
* ``'RBC'`` : rank-biserial correlation
* ``'CLES'`` : common language effect size
See also
--------
scipy.stats.mannwhitneyu, wilcoxon, ttest
Notes
-----
The Mann–Whitney U test [1]_ (also called Wilcoxon rank-sum test) is a
non-parametric test of the null hypothesis that it is equally likely that
a randomly selected value from one sample will be less than or greater
than a randomly selected value from a second sample. The test assumes
that the two samples are independent. This test corrects for ties and by
default uses a continuity correction (see :py:func:`scipy.stats.mannwhitneyu` for details).
The rank biserial correlation [2]_ is the difference between
the proportion of favorable evidence minus the proportion of unfavorable
evidence.
The common language effect size is the proportion of pairs where ``x`` is
higher than ``y``. It was first introduced by McGraw and Wong (1992) [3]_.
Pingouin uses a brute-force version of the formula given by Vargha and
Delaney 2000 [4]_:
.. math:: \\text{CL} = P(X > Y) + .5 \\times P(X = Y)
The advantage is of this method are twofold. First, the brute-force
approach pairs each observation of ``x`` to its ``y`` counterpart, and
therefore does not require normally distributed data. Second, the formula
takes ties into account and therefore works with ordinal data.
When tail is ``'less'``, the CLES is then set to :math:`1 - \\text{CL}`,
which gives the proportion of pairs where ``x`` is *lower* than ``y``.
References
----------
.. [1] Mann, H. B., & Whitney, D. R. (1947). On a test of whether one of
two random variables is stochastically larger than the other.
The annals of mathematical statistics, 50-60.
.. [2] Kerby, D. S. (2014). The simple difference formula: An approach to
teaching nonparametric correlation. Comprehensive Psychology,
3, 11-IT.
.. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size
statistic. Psychological bulletin, 111(2), 361.
.. [4] Vargha, A., & Delaney, H. D. (2000). A Critique and Improvement of
the “CL” Common Language Effect Size Statistics of McGraw and Wong.
Journal of Educational and Behavioral Statistics: A Quarterly
Publication Sponsored by the American Educational Research
Association and the American Statistical Association, 25(2),
101–132. https://doi.org/10.2307/1165329
Examples
--------
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.uniform(low=0, high=1, size=20)
>>> y = np.random.uniform(low=0.2, high=1.2, size=20)
>>> pg.mwu(x, y, alternative='two-sided')
U-val alternative p-val RBC CLES
MWU 97.0 two-sided 0.00556 0.515 0.2425
Compare with SciPy
>>> import scipy
>>> scipy.stats.mannwhitneyu(x, y, use_continuity=True, alternative='two-sided')
MannwhitneyuResult(statistic=97.0, pvalue=0.0055604599321374135)
One-sided test
>>> pg.mwu(x, y, alternative='greater')
U-val alternative p-val RBC CLES
MWU 97.0 greater 0.997442 0.515 0.2425
>>> pg.mwu(x, y, alternative='less')
U-val alternative p-val RBC CLES
MWU 97.0 less 0.00278 0.515 0.7575
Passing keyword arguments to :py:func:`scipy.stats.mannwhitneyu`:
>>> pg.mwu(x, y, alternative='two-sided', method='exact')
U-val alternative p-val RBC CLES
MWU 97.0 two-sided 0.004681 0.515 0.2425
"""
x = np.asarray(x)
y = np.asarray(y)
# Remove NA
x, y = remove_na(x, y, paired=False)
# Check tails
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
if "tail" in kwargs:
raise ValueError(
"Since Pingouin 0.4.0, the 'tail' argument has been renamed to 'alternative'."
)
uval, pval = scipy.stats.mannwhitneyu(x, y, alternative=alternative, **kwargs)
# Effect size 1: Common Language Effect Size
# CLES is tail-specific and calculated according to the formula given in
# Vargha and Delaney 2000 which works with ordinal data.
diff = x[:, None] - y
# cles = max((diff < 0).sum(), (diff > 0).sum()) / diff.size
# Tail = 'greater', with ties set to 0.5
# Note that tail = 'two-sided' gives same output as tail = 'greater'
cles = np.where(diff == 0, 0.5, diff > 0).mean()
cles = 1 - cles if alternative == "less" else cles
# Effect size 2: rank biserial correlation (Wendt 1972)
rbc = 1 - (2 * uval) / diff.size # diff.size = x.size * y.size
# Fill output DataFrame
stats = pd.DataFrame(
{"U-val": uval, "alternative": alternative, "p-val": pval, "RBC": rbc, "CLES": cles},
index=["MWU"],
)
return _postprocess_dataframe(stats)
| (x, y, alternative='two-sided', **kwargs) |
32,028 | pingouin.distribution | normality | Univariate normality test.
Parameters
----------
data : :py:class:`pandas.DataFrame`, series, list or 1D np.array
Iterable. Can be either a single list, 1D numpy array,
or a wide- or long-format pandas dataframe.
dv : str
Dependent variable (only when ``data`` is a long-format dataframe).
group : str
Grouping variable (only when ``data`` is a long-format dataframe).
method : str
Normality test. `'shapiro'` (default) performs the Shapiro-Wilk test
using :py:func:`scipy.stats.shapiro`, `'normaltest'` performs the
omnibus test of normality using :py:func:`scipy.stats.normaltest`, `'jarque_bera'` performs
the Jarque-Bera test using :py:func:`scipy.stats.jarque_bera`.
The Omnibus and Jarque-Bera tests are more suitable than the Shapiro test for
large samples.
alpha : float
Significance level.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'W'``: Test statistic.
* ``'pval'``: p-value.
* ``'normal'``: True if ``data`` is normally distributed.
See Also
--------
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Shapiro-Wilk test calculates a :math:`W` statistic that tests whether a
random sample :math:`x_1, x_2, ..., x_n` comes from a normal distribution.
The :math:`W` statistic is calculated as follows:
.. math::
W = \frac{(\sum_{i=1}^n a_i x_{i})^2}
{\sum_{i=1}^n (x_i - \overline{x})^2}
where the :math:`x_i` are the ordered sample values (in ascending
order) and the :math:`a_i` are constants generated from the means,
variances and covariances of the order statistics of a sample of size
:math:`n` from a standard normal distribution. Specifically:
.. math:: (a_1, ..., a_n) = \frac{m^TV^{-1}}{(m^TV^{-1}V^{-1}m)^{1/2}}
with :math:`m = (m_1, ..., m_n)^T` and :math:`(m_1, ..., m_n)` are the
expected values of the order statistics of independent and identically
distributed random variables sampled from the standard normal distribution,
and :math:`V` is the covariance matrix of those order statistics.
The null-hypothesis of this test is that the population is normally
distributed. Thus, if the p-value is less than the
chosen alpha level (typically set at 0.05), then the null hypothesis is
rejected and there is evidence that the data tested are not normally
distributed.
The result of the Shapiro-Wilk test should be interpreted with caution in
the case of large sample sizes. Indeed, quoting from
`Wikipedia <https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test>`_:
*"Like most statistical significance tests, if the sample size is
sufficiently large this test may detect even trivial departures from
the null hypothesis (i.e., although there may be some statistically
significant effect, it may be too small to be of any practical
significance); thus, additional investigation of the effect size is
typically advisable, e.g., a Q–Q plot in this case."*
Note that missing values are automatically removed (casewise deletion).
References
----------
* Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
* https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
Examples
--------
1. Shapiro-Wilk test on a 1D array.
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> pg.normality(x)
W pval normal
0 0.98414 0.274886 True
2. Omnibus test on a wide-format dataframe with missing values
>>> data = pg.read_dataset('mediation')
>>> data.loc[1, 'X'] = np.nan
>>> pg.normality(data, method='normaltest').round(3)
W pval normal
X 1.792 0.408 True
M 0.492 0.782 True
Y 0.349 0.840 True
Mbin 839.716 0.000 False
Ybin 814.468 0.000 False
W1 24.816 0.000 False
W2 43.400 0.000 False
3. Pandas Series
>>> pg.normality(data['X'], method='normaltest')
W pval normal
X 1.791839 0.408232 True
4. Long-format dataframe
>>> data = pg.read_dataset('rm_anova2')
>>> pg.normality(data, dv='Performance', group='Time')
W pval normal
Time
Pre 0.967718 0.478773 True
Post 0.940728 0.095157 True
5. Same but using the Jarque-Bera test
>>> pg.normality(data, dv='Performance', group='Time', method="jarque_bera")
W pval normal
Time
Pre 0.304021 0.858979 True
Post 1.265656 0.531088 True
| def normality(data, dv=None, group=None, method="shapiro", alpha=0.05):
"""Univariate normality test.
Parameters
----------
data : :py:class:`pandas.DataFrame`, series, list or 1D np.array
Iterable. Can be either a single list, 1D numpy array,
or a wide- or long-format pandas dataframe.
dv : str
Dependent variable (only when ``data`` is a long-format dataframe).
group : str
Grouping variable (only when ``data`` is a long-format dataframe).
method : str
Normality test. `'shapiro'` (default) performs the Shapiro-Wilk test
using :py:func:`scipy.stats.shapiro`, `'normaltest'` performs the
omnibus test of normality using :py:func:`scipy.stats.normaltest`, `'jarque_bera'` performs
the Jarque-Bera test using :py:func:`scipy.stats.jarque_bera`.
The Omnibus and Jarque-Bera tests are more suitable than the Shapiro test for
large samples.
alpha : float
Significance level.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'W'``: Test statistic.
* ``'pval'``: p-value.
* ``'normal'``: True if ``data`` is normally distributed.
See Also
--------
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Shapiro-Wilk test calculates a :math:`W` statistic that tests whether a
random sample :math:`x_1, x_2, ..., x_n` comes from a normal distribution.
The :math:`W` statistic is calculated as follows:
.. math::
W = \\frac{(\\sum_{i=1}^n a_i x_{i})^2}
{\\sum_{i=1}^n (x_i - \\overline{x})^2}
where the :math:`x_i` are the ordered sample values (in ascending
order) and the :math:`a_i` are constants generated from the means,
variances and covariances of the order statistics of a sample of size
:math:`n` from a standard normal distribution. Specifically:
.. math:: (a_1, ..., a_n) = \\frac{m^TV^{-1}}{(m^TV^{-1}V^{-1}m)^{1/2}}
with :math:`m = (m_1, ..., m_n)^T` and :math:`(m_1, ..., m_n)` are the
expected values of the order statistics of independent and identically
distributed random variables sampled from the standard normal distribution,
and :math:`V` is the covariance matrix of those order statistics.
The null-hypothesis of this test is that the population is normally
distributed. Thus, if the p-value is less than the
chosen alpha level (typically set at 0.05), then the null hypothesis is
rejected and there is evidence that the data tested are not normally
distributed.
The result of the Shapiro-Wilk test should be interpreted with caution in
the case of large sample sizes. Indeed, quoting from
`Wikipedia <https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test>`_:
*"Like most statistical significance tests, if the sample size is
sufficiently large this test may detect even trivial departures from
the null hypothesis (i.e., although there may be some statistically
significant effect, it may be too small to be of any practical
significance); thus, additional investigation of the effect size is
typically advisable, e.g., a Q–Q plot in this case."*
Note that missing values are automatically removed (casewise deletion).
References
----------
* Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
* https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
Examples
--------
1. Shapiro-Wilk test on a 1D array.
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> pg.normality(x)
W pval normal
0 0.98414 0.274886 True
2. Omnibus test on a wide-format dataframe with missing values
>>> data = pg.read_dataset('mediation')
>>> data.loc[1, 'X'] = np.nan
>>> pg.normality(data, method='normaltest').round(3)
W pval normal
X 1.792 0.408 True
M 0.492 0.782 True
Y 0.349 0.840 True
Mbin 839.716 0.000 False
Ybin 814.468 0.000 False
W1 24.816 0.000 False
W2 43.400 0.000 False
3. Pandas Series
>>> pg.normality(data['X'], method='normaltest')
W pval normal
X 1.791839 0.408232 True
4. Long-format dataframe
>>> data = pg.read_dataset('rm_anova2')
>>> pg.normality(data, dv='Performance', group='Time')
W pval normal
Time
Pre 0.967718 0.478773 True
Post 0.940728 0.095157 True
5. Same but using the Jarque-Bera test
>>> pg.normality(data, dv='Performance', group='Time', method="jarque_bera")
W pval normal
Time
Pre 0.304021 0.858979 True
Post 1.265656 0.531088 True
"""
assert isinstance(data, (pd.DataFrame, pd.Series, list, np.ndarray))
assert method in ["shapiro", "normaltest", "jarque_bera"]
if isinstance(data, pd.Series):
data = data.to_frame()
col_names = ["W", "pval"]
func = getattr(scipy.stats, method)
if isinstance(data, (list, np.ndarray)):
data = np.asarray(data)
assert data.ndim == 1, "Data must be 1D."
assert data.size > 3, "Data must have more than 3 samples."
data = remove_na(data)
stats = pd.DataFrame(func(data)).T
stats.columns = col_names
stats["normal"] = np.where(stats["pval"] > alpha, True, False)
else:
# Data is a Pandas DataFrame
if dv is None and group is None:
# Wide-format
# Get numeric data only
numdata = data._get_numeric_data()
stats = numdata.apply(lambda x: func(x.dropna()), result_type="expand", axis=0).T
stats.columns = col_names
stats["normal"] = np.where(stats["pval"] > alpha, True, False)
else:
# Long-format
stats = pd.DataFrame([])
assert group in data.columns
assert dv in data.columns
grp = data.groupby(group, observed=True, sort=False)
cols = grp.groups.keys()
for idx, tmp in grp:
if tmp[dv].count() <= 3:
warnings.warn(f"Group {idx} has less than 4 valid samples. Returning NaN.")
st_grp = pd.DataFrame(
{"W": np.nan, "pval": np.nan, "normal": False}, index=[idx]
)
else:
st_grp = normality(tmp[dv].to_numpy(), method=method, alpha=alpha)
stats = pd.concat([stats, st_grp], axis=0, ignore_index=True)
stats.index = cols
stats.index.name = group
return _postprocess_dataframe(stats)
| (data, dv=None, group=None, method='shapiro', alpha=0.05) |
32,030 | pingouin.pairwise | pairwise_corr | Pairwise (partial) correlations between columns of a pandas dataframe.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
columns : list or str
Column names in data:
* ``["a", "b", "c"]``: combination between columns a, b, and c.
* ``["a"]``: product between a and all the other numeric columns.
* ``[["a"], ["b", "c"]]``: product between ["a"] and ["b", "c"].
* ``[["a", "d"], ["b", "c"]]``: product between ["a", "d"] and
["b", "c"].
* ``[["a", "d"], None]``: product between ["a", "d"] and all other
numeric columns in dataframe.
If column is None, the function will return the pairwise correlation
between the combination of all the numeric columns in data.
See the examples section for more details on this.
covar : None, string or list
Covariate(s) for partial correlation. Must be one or more columns
in data. Use a list if there are more than one covariate. If
``covar`` is not None, a partial correlation will be computed using
:py:func:`pingouin.partial_corr` function.
.. important:: Only ``method='pearson'`` and ``method='spearman'``
are currently supported in partial correlation.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
method : string
Correlation type:
* ``'pearson'``: Pearson :math:`r` product-moment correlation
* ``'spearman'``: Spearman :math:`\rho` rank-order correlation
* ``'kendall'``: Kendall's :math:`\tau_B` correlation
(for ordinal data)
* ``'bicor'``: Biweight midcorrelation (robust)
* ``'percbend'``: Percentage bend correlation (robust)
* ``'shepherd'``: Shepherd's pi correlation (robust)
* ``'skipped'``: Skipped correlation (robust)
padjust : string
Method used for testing and adjustment of pvalues.
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
nan_policy : string
Can be ``'listwise'`` for listwise deletion of missing values
(= complete-case analysis) or ``'pairwise'`` (default) for the more
liberal pairwise deletion (= available-case analysis).
.. versionadded:: 0.2.9
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'X'``: Name(s) of first columns.
* ``'Y'``: Name(s) of second columns.
* ``'method'``: Correlation type.
* ``'covar'``: List of specified covariate(s), only when covariates are passed.
* ``'alternative'``: Tail of the test.
* ``'n'``: Sample size (after removal of missing values).
* ``'r'``: Correlation coefficients.
* ``'CI95'``: 95% parametric confidence intervals.
* ``'p-unc'``: Uncorrected p-values.
* ``'p-corr'``: Corrected p-values.
* ``'p-adjust'``: P-values correction method.
* ``'BF10'``: Bayes Factor of the alternative hypothesis (only for Pearson correlation)
* ``'power'``: achieved power of the test (= 1 - type II error).
Notes
-----
Please refer to the :py:func:`pingouin.corr()` function for a description
of the different methods. Missing values are automatically removed from the
data using a pairwise deletion.
This function is more flexible and gives a much more detailed
output than the :py:func:`pandas.DataFrame.corr()` method (i.e. p-values,
confidence interval, Bayes Factor...). This comes however at
an increased computational cost. While this should not be discernible for
a dataframe with less than 10,000 rows and/or less than 20 columns, this
function can be slow for very large datasets.
A faster alternative to get the r-values and p-values in a matrix format is
to use the :py:func:`pingouin.rcorr` function, which works directly as a
:py:class:`pandas.DataFrame` method (see example below).
This function also works with two-dimensional multi-index columns. In this
case, columns must be list(s) of tuple(s). Please refer to this `example
Jupyter notebook
<https://github.com/raphaelvallat/pingouin/blob/master/notebooks/04_Correlations.ipynb>`_
for more details.
If and only if ``covar`` is specified, this function will compute the
pairwise partial correlation between the variables. If you are only
interested in computing the partial correlation matrix (i.e. the raw
pairwise partial correlation coefficient matrix, without the p-values,
sample sizes, etc), a better alternative is to use the
:py:func:`pingouin.pcorr` function (see example 7).
Examples
--------
1. One-sided spearman correlation corrected for multiple comparisons
>>> import pandas as pd
>>> import pingouin as pg
>>> pd.set_option('display.expand_frame_repr', False)
>>> pd.set_option('display.max_columns', 20)
>>> data = pg.read_dataset('pairwise_corr').iloc[:, 1:]
>>> pg.pairwise_corr(data, method='spearman', alternative='greater', padjust='bonf').round(3)
X Y method alternative n r CI95% p-unc p-corr p-adjust power
0 Neuroticism Extraversion spearman greater 500 -0.325 [-0.39, 1.0] 1.000 1.000 bonf 0.000
1 Neuroticism Openness spearman greater 500 -0.028 [-0.1, 1.0] 0.735 1.000 bonf 0.012
2 Neuroticism Agreeableness spearman greater 500 -0.151 [-0.22, 1.0] 1.000 1.000 bonf 0.000
3 Neuroticism Conscientiousness spearman greater 500 -0.356 [-0.42, 1.0] 1.000 1.000 bonf 0.000
4 Extraversion Openness spearman greater 500 0.243 [0.17, 1.0] 0.000 0.000 bonf 1.000
5 Extraversion Agreeableness spearman greater 500 0.062 [-0.01, 1.0] 0.083 0.832 bonf 0.398
6 Extraversion Conscientiousness spearman greater 500 0.056 [-0.02, 1.0] 0.106 1.000 bonf 0.345
7 Openness Agreeableness spearman greater 500 0.170 [0.1, 1.0] 0.000 0.001 bonf 0.985
8 Openness Conscientiousness spearman greater 500 -0.007 [-0.08, 1.0] 0.560 1.000 bonf 0.036
9 Agreeableness Conscientiousness spearman greater 500 0.161 [0.09, 1.0] 0.000 0.002 bonf 0.976
2. Robust two-sided biweight midcorrelation with uncorrected p-values
>>> pcor = pg.pairwise_corr(data, columns=['Openness', 'Extraversion',
... 'Neuroticism'], method='bicor')
>>> pcor.round(3)
X Y method alternative n r CI95% p-unc power
0 Openness Extraversion bicor two-sided 500 0.247 [0.16, 0.33] 0.000 1.000
1 Openness Neuroticism bicor two-sided 500 -0.028 [-0.12, 0.06] 0.535 0.095
2 Extraversion Neuroticism bicor two-sided 500 -0.343 [-0.42, -0.26] 0.000 1.000
3. One-versus-all pairwise correlations
>>> pg.pairwise_corr(data, columns=['Neuroticism']).round(3)
X Y method alternative n r CI95% p-unc BF10 power
0 Neuroticism Extraversion pearson two-sided 500 -0.350 [-0.42, -0.27] 0.000 6.765e+12 1.000
1 Neuroticism Openness pearson two-sided 500 -0.010 [-0.1, 0.08] 0.817 0.058 0.056
2 Neuroticism Agreeableness pearson two-sided 500 -0.134 [-0.22, -0.05] 0.003 5.122 0.854
3 Neuroticism Conscientiousness pearson two-sided 500 -0.368 [-0.44, -0.29] 0.000 2.644e+14 1.000
4. Pairwise correlations between two lists of columns (cartesian product)
>>> columns = [['Neuroticism', 'Extraversion'], ['Openness']]
>>> pg.pairwise_corr(data, columns).round(3)
X Y method alternative n r CI95% p-unc BF10 power
0 Neuroticism Openness pearson two-sided 500 -0.010 [-0.1, 0.08] 0.817 0.058 0.056
1 Extraversion Openness pearson two-sided 500 0.267 [0.18, 0.35] 0.000 5.277e+06 1.000
5. As a Pandas method
>>> pcor = data.pairwise_corr(covar='Neuroticism', method='spearman')
6. Pairwise partial correlation
>>> pg.pairwise_corr(data, covar=['Neuroticism', 'Openness'])
X Y method covar alternative n r CI95% p-unc
0 Extraversion Agreeableness pearson ['Neuroticism', 'Openness'] two-sided 500 -0.038737 [-0.13, 0.05] 0.388361
1 Extraversion Conscientiousness pearson ['Neuroticism', 'Openness'] two-sided 500 -0.071427 [-0.16, 0.02] 0.111389
2 Agreeableness Conscientiousness pearson ['Neuroticism', 'Openness'] two-sided 500 0.123108 [0.04, 0.21] 0.005944
7. Pairwise partial correlation matrix using :py:func:`pingouin.pcorr`
>>> data[['Neuroticism', 'Openness', 'Extraversion']].pcorr().round(3)
Neuroticism Openness Extraversion
Neuroticism 1.000 0.092 -0.360
Openness 0.092 1.000 0.281
Extraversion -0.360 0.281 1.000
8. Correlation matrix with p-values using :py:func:`pingouin.rcorr`
>>> data[['Neuroticism', 'Openness', 'Extraversion']].rcorr()
Neuroticism Openness Extraversion
Neuroticism - ***
Openness -0.01 - ***
Extraversion -0.35 0.267 -
| @pf.register_dataframe_method
def pairwise_corr(
data,
columns=None,
covar=None,
alternative="two-sided",
method="pearson",
padjust="none",
nan_policy="pairwise",
):
"""Pairwise (partial) correlations between columns of a pandas dataframe.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
columns : list or str
Column names in data:
* ``["a", "b", "c"]``: combination between columns a, b, and c.
* ``["a"]``: product between a and all the other numeric columns.
* ``[["a"], ["b", "c"]]``: product between ["a"] and ["b", "c"].
* ``[["a", "d"], ["b", "c"]]``: product between ["a", "d"] and
["b", "c"].
* ``[["a", "d"], None]``: product between ["a", "d"] and all other
numeric columns in dataframe.
If column is None, the function will return the pairwise correlation
between the combination of all the numeric columns in data.
See the examples section for more details on this.
covar : None, string or list
Covariate(s) for partial correlation. Must be one or more columns
in data. Use a list if there are more than one covariate. If
``covar`` is not None, a partial correlation will be computed using
:py:func:`pingouin.partial_corr` function.
.. important:: Only ``method='pearson'`` and ``method='spearman'``
are currently supported in partial correlation.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
method : string
Correlation type:
* ``'pearson'``: Pearson :math:`r` product-moment correlation
* ``'spearman'``: Spearman :math:`\\rho` rank-order correlation
* ``'kendall'``: Kendall's :math:`\\tau_B` correlation
(for ordinal data)
* ``'bicor'``: Biweight midcorrelation (robust)
* ``'percbend'``: Percentage bend correlation (robust)
* ``'shepherd'``: Shepherd's pi correlation (robust)
* ``'skipped'``: Skipped correlation (robust)
padjust : string
Method used for testing and adjustment of pvalues.
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
nan_policy : string
Can be ``'listwise'`` for listwise deletion of missing values
(= complete-case analysis) or ``'pairwise'`` (default) for the more
liberal pairwise deletion (= available-case analysis).
.. versionadded:: 0.2.9
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'X'``: Name(s) of first columns.
* ``'Y'``: Name(s) of second columns.
* ``'method'``: Correlation type.
* ``'covar'``: List of specified covariate(s), only when covariates are passed.
* ``'alternative'``: Tail of the test.
* ``'n'``: Sample size (after removal of missing values).
* ``'r'``: Correlation coefficients.
* ``'CI95'``: 95% parametric confidence intervals.
* ``'p-unc'``: Uncorrected p-values.
* ``'p-corr'``: Corrected p-values.
* ``'p-adjust'``: P-values correction method.
* ``'BF10'``: Bayes Factor of the alternative hypothesis (only for Pearson correlation)
* ``'power'``: achieved power of the test (= 1 - type II error).
Notes
-----
Please refer to the :py:func:`pingouin.corr()` function for a description
of the different methods. Missing values are automatically removed from the
data using a pairwise deletion.
This function is more flexible and gives a much more detailed
output than the :py:func:`pandas.DataFrame.corr()` method (i.e. p-values,
confidence interval, Bayes Factor...). This comes however at
an increased computational cost. While this should not be discernible for
a dataframe with less than 10,000 rows and/or less than 20 columns, this
function can be slow for very large datasets.
A faster alternative to get the r-values and p-values in a matrix format is
to use the :py:func:`pingouin.rcorr` function, which works directly as a
:py:class:`pandas.DataFrame` method (see example below).
This function also works with two-dimensional multi-index columns. In this
case, columns must be list(s) of tuple(s). Please refer to this `example
Jupyter notebook
<https://github.com/raphaelvallat/pingouin/blob/master/notebooks/04_Correlations.ipynb>`_
for more details.
If and only if ``covar`` is specified, this function will compute the
pairwise partial correlation between the variables. If you are only
interested in computing the partial correlation matrix (i.e. the raw
pairwise partial correlation coefficient matrix, without the p-values,
sample sizes, etc), a better alternative is to use the
:py:func:`pingouin.pcorr` function (see example 7).
Examples
--------
1. One-sided spearman correlation corrected for multiple comparisons
>>> import pandas as pd
>>> import pingouin as pg
>>> pd.set_option('display.expand_frame_repr', False)
>>> pd.set_option('display.max_columns', 20)
>>> data = pg.read_dataset('pairwise_corr').iloc[:, 1:]
>>> pg.pairwise_corr(data, method='spearman', alternative='greater', padjust='bonf').round(3)
X Y method alternative n r CI95% p-unc p-corr p-adjust power
0 Neuroticism Extraversion spearman greater 500 -0.325 [-0.39, 1.0] 1.000 1.000 bonf 0.000
1 Neuroticism Openness spearman greater 500 -0.028 [-0.1, 1.0] 0.735 1.000 bonf 0.012
2 Neuroticism Agreeableness spearman greater 500 -0.151 [-0.22, 1.0] 1.000 1.000 bonf 0.000
3 Neuroticism Conscientiousness spearman greater 500 -0.356 [-0.42, 1.0] 1.000 1.000 bonf 0.000
4 Extraversion Openness spearman greater 500 0.243 [0.17, 1.0] 0.000 0.000 bonf 1.000
5 Extraversion Agreeableness spearman greater 500 0.062 [-0.01, 1.0] 0.083 0.832 bonf 0.398
6 Extraversion Conscientiousness spearman greater 500 0.056 [-0.02, 1.0] 0.106 1.000 bonf 0.345
7 Openness Agreeableness spearman greater 500 0.170 [0.1, 1.0] 0.000 0.001 bonf 0.985
8 Openness Conscientiousness spearman greater 500 -0.007 [-0.08, 1.0] 0.560 1.000 bonf 0.036
9 Agreeableness Conscientiousness spearman greater 500 0.161 [0.09, 1.0] 0.000 0.002 bonf 0.976
2. Robust two-sided biweight midcorrelation with uncorrected p-values
>>> pcor = pg.pairwise_corr(data, columns=['Openness', 'Extraversion',
... 'Neuroticism'], method='bicor')
>>> pcor.round(3)
X Y method alternative n r CI95% p-unc power
0 Openness Extraversion bicor two-sided 500 0.247 [0.16, 0.33] 0.000 1.000
1 Openness Neuroticism bicor two-sided 500 -0.028 [-0.12, 0.06] 0.535 0.095
2 Extraversion Neuroticism bicor two-sided 500 -0.343 [-0.42, -0.26] 0.000 1.000
3. One-versus-all pairwise correlations
>>> pg.pairwise_corr(data, columns=['Neuroticism']).round(3)
X Y method alternative n r CI95% p-unc BF10 power
0 Neuroticism Extraversion pearson two-sided 500 -0.350 [-0.42, -0.27] 0.000 6.765e+12 1.000
1 Neuroticism Openness pearson two-sided 500 -0.010 [-0.1, 0.08] 0.817 0.058 0.056
2 Neuroticism Agreeableness pearson two-sided 500 -0.134 [-0.22, -0.05] 0.003 5.122 0.854
3 Neuroticism Conscientiousness pearson two-sided 500 -0.368 [-0.44, -0.29] 0.000 2.644e+14 1.000
4. Pairwise correlations between two lists of columns (cartesian product)
>>> columns = [['Neuroticism', 'Extraversion'], ['Openness']]
>>> pg.pairwise_corr(data, columns).round(3)
X Y method alternative n r CI95% p-unc BF10 power
0 Neuroticism Openness pearson two-sided 500 -0.010 [-0.1, 0.08] 0.817 0.058 0.056
1 Extraversion Openness pearson two-sided 500 0.267 [0.18, 0.35] 0.000 5.277e+06 1.000
5. As a Pandas method
>>> pcor = data.pairwise_corr(covar='Neuroticism', method='spearman')
6. Pairwise partial correlation
>>> pg.pairwise_corr(data, covar=['Neuroticism', 'Openness'])
X Y method covar alternative n r CI95% p-unc
0 Extraversion Agreeableness pearson ['Neuroticism', 'Openness'] two-sided 500 -0.038737 [-0.13, 0.05] 0.388361
1 Extraversion Conscientiousness pearson ['Neuroticism', 'Openness'] two-sided 500 -0.071427 [-0.16, 0.02] 0.111389
2 Agreeableness Conscientiousness pearson ['Neuroticism', 'Openness'] two-sided 500 0.123108 [0.04, 0.21] 0.005944
7. Pairwise partial correlation matrix using :py:func:`pingouin.pcorr`
>>> data[['Neuroticism', 'Openness', 'Extraversion']].pcorr().round(3)
Neuroticism Openness Extraversion
Neuroticism 1.000 0.092 -0.360
Openness 0.092 1.000 0.281
Extraversion -0.360 0.281 1.000
8. Correlation matrix with p-values using :py:func:`pingouin.rcorr`
>>> data[['Neuroticism', 'Openness', 'Extraversion']].rcorr()
Neuroticism Openness Extraversion
Neuroticism - ***
Openness -0.01 - ***
Extraversion -0.35 0.267 -
"""
from pingouin.correlation import corr, partial_corr
# Check arguments
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
assert nan_policy in ["listwise", "pairwise"]
# Keep only numeric columns
data = data._get_numeric_data()
# Remove columns with constant value and/or NaN
data = data.loc[:, data.nunique(dropna=True) >= 2]
# Extract columns names
keys = data.columns.tolist()
# First ensure that columns is a list
if isinstance(columns, (str, tuple)):
columns = [columns]
def traverse(o, tree_types=(list, tuple)):
"""Helper function to flatten nested lists.
From https://stackoverflow.com/a/6340578
"""
if isinstance(o, tree_types):
for value in o:
yield from traverse(value, tree_types)
else:
yield o
# Check if columns index has multiple levels
if isinstance(data.columns, pd.MultiIndex):
multi_index = True
if columns is not None:
# Simple List with one element: [('L0', 'L1')]
# Simple list with >= 2 elements: [('L0', 'L1'), ('L0', 'L2')]
# Nested lists: [[('L0', 'L1')], ...] or [..., [('L0', 'L1')]]
col_flatten = list(traverse(columns, tree_types=list))
assert all(isinstance(c, (tuple, type(None))) for c in col_flatten)
else:
multi_index = False
# Then define combinations / products between columns
if columns is None:
# Case A: column is not defined --> corr between all numeric columns
combs = list(combinations(keys, 2))
else:
# Case B: column is specified
if isinstance(columns[0], (list, np.ndarray)):
group1 = [e for e in columns[0] if e in keys]
# Assert that column is two-dimensional
if len(columns) == 1:
columns.append(None)
if isinstance(columns[1], (list, np.ndarray)) and len(columns[1]):
# B1: [['a', 'b'], ['c', 'd']]
group2 = [e for e in columns[1] if e in keys]
else:
# B2: [['a', 'b']], [['a', 'b'], None] or [['a', 'b'], 'all']
group2 = [e for e in keys if e not in group1]
combs = list(product(group1, group2))
else:
# Column is a simple list
if len(columns) == 1:
# Case B3: one-versus-all, e.g. ['a'] or 'a'
# Check that this column exist
if columns[0] not in keys:
msg = '"%s" is not in data or is not numeric.' % columns[0]
raise ValueError(msg)
others = [e for e in keys if e != columns[0]]
combs = list(product(columns, others))
else:
# Combinations between all specified columns ['a', 'b', 'c']
# Make sure that we keep numeric columns
columns = [c for c in columns if c in keys]
if len(columns) == 1:
# If only one-column is left, equivalent to ['a']
others = [e for e in keys if e != columns[0]]
combs = list(product(columns, others))
else:
# combinations between ['a', 'b', 'c']
combs = list(combinations(columns, 2))
combs = np.array(combs)
if len(combs) == 0:
raise ValueError(
"No column combination found. Please make sure that "
"the specified columns exist in the dataframe, are "
"numeric, and contains at least two unique values."
)
# Initialize empty dataframe
if multi_index:
X = list(zip(combs[:, 0, 0], combs[:, 0, 1]))
Y = list(zip(combs[:, 1, 0], combs[:, 1, 1]))
else:
X = combs[:, 0]
Y = combs[:, 1]
stats = pd.DataFrame(
{"X": X, "Y": Y, "method": method, "alternative": alternative},
index=range(len(combs)),
columns=[
"X",
"Y",
"method",
"alternative",
"n",
"outliers",
"r",
"CI95%",
"p-val",
"BF10",
"power",
],
)
# Now we check if covariates are present
if covar is not None:
assert isinstance(covar, (str, list, pd.Index)), "covar must be list or string."
if isinstance(covar, str):
covar = [covar]
elif isinstance(covar, pd.Index):
covar = covar.tolist()
# Check that columns exist and are numeric
assert all(
[c in keys for c in covar]
), "Covariate(s) are either not in data or not numeric."
# And we make sure that X or Y does not contain covar
stats = stats[~stats[["X", "Y"]].isin(covar).any(axis=1)]
stats = stats.reset_index(drop=True)
if stats.shape[0] == 0:
raise ValueError(
"No column combination found. Please make sure "
"that the specified columns and covar exist in "
"the dataframe, are numeric, and contains at "
"least two unique values."
)
# Listwise deletion of missing values
if nan_policy == "listwise":
all_cols = np.unique(stats[["X", "Y"]].to_numpy()).tolist()
if covar is not None:
all_cols.extend(covar)
data = data[all_cols].dropna()
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
# Compute pairwise correlations and fill dataframe
for i in range(stats.shape[0]):
col1, col2 = stats.at[i, "X"], stats.at[i, "Y"]
if covar is None:
cor_st = corr(
data[col1].to_numpy(), data[col2].to_numpy(), alternative=alternative, method=method
)
else:
cor_st = partial_corr(
data=data, x=col1, y=col2, covar=covar, alternative=alternative, method=method
)
cor_st_keys = cor_st.columns.tolist()
for c in cor_st_keys:
stats.at[i, c] = cor_st.at[method, c]
options.update(old_options) # restore options
# Force conversion to numeric
stats = stats.astype({"r": float, "n": int, "p-val": float, "outliers": float, "power": float})
# Multiple comparisons
stats = stats.rename(columns={"p-val": "p-unc"})
padjust = None if stats["p-unc"].size <= 1 else padjust
if padjust is not None:
if padjust.lower() != "none":
reject, stats["p-corr"] = multicomp(stats["p-unc"].to_numpy(), method=padjust)
stats["p-adjust"] = padjust
else:
stats["p-corr"] = None
stats["p-adjust"] = None
# Standardize correlation coefficients (Fisher z-transformation)
# stats['z'] = np.arctanh(stats['r'].to_numpy())
col_order = [
"X",
"Y",
"method",
"alternative",
"n",
"outliers",
"r",
"CI95%",
"p-unc",
"p-corr",
"p-adjust",
"BF10",
"power",
]
# Reorder columns and remove empty ones
stats = stats.reindex(columns=col_order).dropna(how="all", axis=1)
# Add covariates names if present
if covar is not None:
stats.insert(loc=3, column="covar", value=str(covar))
return _postprocess_dataframe(stats)
| (data, columns=None, covar=None, alternative='two-sided', method='pearson', padjust='none', nan_policy='pairwise') |
32,031 | pingouin.pairwise | pairwise_gameshowell | Pairwise Games-Howell post-hoc test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame
dv : string
Name of column containing the dependent variable.
between: string
Name of column containing the between factor.
effsize : string or None
Effect size type. Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
Returns
-------
stats : :py:class:`pandas.DataFrame`
Stats summary:
* ``'A'``: Name of first measurement
* ``'B'``: Name of second measurement
* ``'mean(A)'``: Mean of first measurement
* ``'mean(B)'``: Mean of second measurement
* ``'diff'``: Mean difference (= mean(A) - mean(B))
* ``'se'``: Standard error
* ``'T'``: T-values
* ``'df'``: adjusted degrees of freedom
* ``'pval'``: Games-Howell corrected p-values
* ``'hedges'``: Hedges effect size (or any effect size defined in
``effsize``)
See also
--------
pairwise_tests, pairwise_tukey
Notes
-----
Games-Howell [1]_ is very similar to the Tukey HSD post-hoc test but is much more robust to
heterogeneity of variances. While the Tukey-HSD post-hoc is optimal after a classic one-way
ANOVA, the Games-Howell is optimal after a Welch ANOVA. Please note that Games-Howell
is not valid for repeated measures ANOVA. Only one-way ANOVA design are supported.
Compared to the Tukey-HSD test, the Games-Howell test uses different pooled variances for
each pair of variables instead of the same pooled variance.
The T-values are defined as:
.. math::
t = \frac{\overline{x}_i - \overline{x}_j}
{\sqrt{(\frac{s_i^2}{n_i} + \frac{s_j^2}{n_j})}}
and the corrected degrees of freedom are:
.. math::
v = \frac{(\frac{s_i^2}{n_i} + \frac{s_j^2}{n_j})^2}
{\frac{(\frac{s_i^2}{n_i})^2}{n_i-1} +
\frac{(\frac{s_j^2}{n_j})^2}{n_j-1}}
where :math:`\overline{x}_i`, :math:`s_i^2`, and :math:`n_i` are the mean, variance and sample
size of the first group and :math:`\overline{x}_j`, :math:`s_j^2`, and :math:`n_j` the mean,
variance and sample size of the second group.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\sqrt2|t_i|, r, v_i)`.
References
----------
.. [1] Games, Paul A., and John F. Howell. "Pairwise multiple comparison
procedures with unequal n's and/or variances: a Monte Carlo study."
Journal of Educational Statistics 1.2 (1976): 113-125.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Games-Howell post-hocs on the Penguins dataset.
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> pg.pairwise_gameshowell(data=df, dv='body_mass_g',
... between='species').round(3)
A B mean(A) mean(B) diff se T df pval hedges
0 Adelie Chinstrap 3700.662 3733.088 -32.426 59.706 -0.543 152.455 0.85 -0.074
1 Adelie Gentoo 3700.662 5076.016 -1375.354 58.811 -23.386 249.643 0.00 -2.860
2 Chinstrap Gentoo 3733.088 5076.016 -1342.928 65.103 -20.628 170.404 0.00 -2.875
| def pairwise_gameshowell(data=None, dv=None, between=None, effsize="hedges"):
"""Pairwise Games-Howell post-hoc test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame
dv : string
Name of column containing the dependent variable.
between: string
Name of column containing the between factor.
effsize : string or None
Effect size type. Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
Returns
-------
stats : :py:class:`pandas.DataFrame`
Stats summary:
* ``'A'``: Name of first measurement
* ``'B'``: Name of second measurement
* ``'mean(A)'``: Mean of first measurement
* ``'mean(B)'``: Mean of second measurement
* ``'diff'``: Mean difference (= mean(A) - mean(B))
* ``'se'``: Standard error
* ``'T'``: T-values
* ``'df'``: adjusted degrees of freedom
* ``'pval'``: Games-Howell corrected p-values
* ``'hedges'``: Hedges effect size (or any effect size defined in
``effsize``)
See also
--------
pairwise_tests, pairwise_tukey
Notes
-----
Games-Howell [1]_ is very similar to the Tukey HSD post-hoc test but is much more robust to
heterogeneity of variances. While the Tukey-HSD post-hoc is optimal after a classic one-way
ANOVA, the Games-Howell is optimal after a Welch ANOVA. Please note that Games-Howell
is not valid for repeated measures ANOVA. Only one-way ANOVA design are supported.
Compared to the Tukey-HSD test, the Games-Howell test uses different pooled variances for
each pair of variables instead of the same pooled variance.
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})}}
and the corrected degrees of freedom are:
.. math::
v = \\frac{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})^2}
{\\frac{(\\frac{s_i^2}{n_i})^2}{n_i-1} +
\\frac{(\\frac{s_j^2}{n_j})^2}{n_j-1}}
where :math:`\\overline{x}_i`, :math:`s_i^2`, and :math:`n_i` are the mean, variance and sample
size of the first group and :math:`\\overline{x}_j`, :math:`s_j^2`, and :math:`n_j` the mean,
variance and sample size of the second group.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2|t_i|, r, v_i)`.
References
----------
.. [1] Games, Paul A., and John F. Howell. "Pairwise multiple comparison
procedures with unequal n's and/or variances: a Monte Carlo study."
Journal of Educational Statistics 1.2 (1976): 113-125.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Games-Howell post-hocs on the Penguins dataset.
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> pg.pairwise_gameshowell(data=df, dv='body_mass_g',
... between='species').round(3)
A B mean(A) mean(B) diff se T df pval hedges
0 Adelie Chinstrap 3700.662 3733.088 -32.426 59.706 -0.543 152.455 0.85 -0.074
1 Adelie Gentoo 3700.662 5076.016 -1375.354 58.811 -23.386 249.643 0.00 -2.860
2 Chinstrap Gentoo 3733.088 5076.016 -1342.928 65.103 -20.628 170.404 0.00 -2.875
"""
# Check the dataframe
data = _check_dataframe(dv=dv, between=between, effects="between", data=data)
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract infos
ng = data[between].nunique()
grp = data.groupby(between, observed=True)[dv] # default is sort=True
# Careful: pd.unique does NOT sort whereas numpy does
# The line below should be equal to labels = np.unique(data[between])
# However, this does not work if between is a Categorical column, because
# Pandas applies a custom, not alphabetical, sorting.
# See https://github.com/raphaelvallat/pingouin/issues/111
labels = np.array(list(grp.groups.keys()))
n = grp.count().to_numpy()
gmeans = grp.mean(numeric_only=True).to_numpy()
gvars = grp.var(numeric_only=True).to_numpy() # numeric_only=True added in pandas 1.5
# Pairwise combinations
g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T
mn = gmeans[g1] - gmeans[g2]
se = np.sqrt(gvars[g1] / n[g1] + gvars[g2] / n[g2])
tval = mn / np.sqrt(gvars[g1] / n[g1] + gvars[g2] / n[g2])
df = (gvars[g1] / n[g1] + gvars[g2] / n[g2]) ** 2 / (
(((gvars[g1] / n[g1]) ** 2) / (n[g1] - 1)) + (((gvars[g2] / n[g2]) ** 2) / (n[g2] - 1))
)
# Compute corrected p-values
pval = studentized_range.sf(np.sqrt(2) * np.abs(tval), ng, df)
pval = np.clip(pval, 0, 1)
# Uncorrected p-values
# from scipy.stats import t
# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2
# Effect size
# Method 1: Approximation
# d = tval * np.sqrt(1 / n[g1] + 1 / n[g2])
# ef = convert_effsize(d, "cohen", effsize, n[g1], n[g2])
# Method 2: Exact
ef = []
for idx_a, idx_b in zip(g1, g2):
ef.append(
compute_effsize(
grp.get_group(labels[idx_a]),
grp.get_group(labels[idx_b]),
paired=False,
eftype=effsize,
)
)
# Create dataframe
stats = pd.DataFrame(
{
"A": labels[g1],
"B": labels[g2],
"mean(A)": gmeans[g1],
"mean(B)": gmeans[g2],
"diff": mn,
"se": se,
"T": tval,
"df": df,
"pval": pval,
effsize: ef,
}
)
return _postprocess_dataframe(stats)
| (data=None, dv=None, between=None, effsize='hedges') |
32,032 | pingouin.pairwise | pairwise_tests | Pairwise tests.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column containing the dependent variable.
between : string or list with 2 elements
Name of column(s) containing the between-subject factor(s).
within : string or list with 2 elements
Name of column(s) containing the within-subject factor(s), i.e. the
repeated measurements.
subject : string
Name of column containing the subject identifier. This is mandatory
when ``within`` is specified.
parametric : boolean
If True (default), use the parametric :py:func:`ttest` function.
If False, use :py:func:`pingouin.wilcoxon` or :py:func:`pingouin.mwu`
for paired or unpaired samples, respectively.
marginal : boolean
If True (default), the between-subject pairwise T-test(s) will be calculated
after averaging across all levels of the within-subject factor in mixed
design. This is recommended to avoid violating the assumption of
independence and conflating the degrees of freedom by the
number of repeated measurements.
.. versionadded:: 0.3.2
alpha : float
Significance level
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return one-sided
p-values. "greater" tests against the alternative hypothesis that the mean of ``x``
is greater than the mean of ``y``.
padjust : string
Method used for testing and adjustment of pvalues.
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
effsize : string or None
Effect size type. Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
correction : string or boolean
For independent two sample T-tests, specify whether or not to correct for
unequal variances using Welch separate variances T-test. If `'auto'`,
it will automatically uses Welch T-test when the sample sizes are
unequal, as recommended by Zimmerman 2004.
.. versionadded:: 0.3.2
nan_policy : string
Can be `'listwise'` for listwise deletion of missing values in repeated
measures design (= complete-case analysis) or `'pairwise'` for the
more liberal pairwise deletion (= available-case analysis). The former (default) is more
appropriate for post-hoc analysis following an ANOVA, however it can drastically reduce
the power of the test: any subject with one or more missing value(s) will be
completely removed from the analysis.
.. versionadded:: 0.2.9
return_desc : boolean
If True, append group means and std to the output dataframe
interaction : boolean
If there are multiple factors and ``interaction`` is True (default),
Pingouin will also calculate T-tests for the interaction term (see Notes).
.. versionadded:: 0.2.9
within_first : boolean
Determines the order of the interaction in mixed design. Pingouin will
return within * between when this parameter is set to True (default),
and between * within otherwise.
.. versionadded:: 0.3.6
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'Contrast'``: Contrast (= independent variable or interaction)
* ``'A'``: Name of first measurement
* ``'B'``: Name of second measurement
* ``'Paired'``: indicates whether the two measurements are paired or
independent
* ``'Parametric'``: indicates if (non)-parametric tests were used
* ``'T'``: T statistic (only if parametric=True)
* ``'U-val'``: Mann-Whitney U stat (if parametric=False and unpaired
data)
* ``'W-val'``: Wilcoxon W stat (if parametric=False and paired data)
* ``'dof'``: degrees of freedom (only if parametric=True)
* ``'alternative'``: tail of the test
* ``'p-unc'``: Uncorrected p-values
* ``'p-corr'``: Corrected p-values
* ``'p-adjust'``: p-values correction method
* ``'BF10'``: Bayes Factor
* ``'hedges'``: effect size (or any effect size defined in
``effsize``)
See also
--------
ttest, mwu, wilcoxon, compute_effsize, multicomp
Notes
-----
Data are expected to be in long-format. If your data is in wide-format,
you can use the :py:func:`pandas.melt` function to convert from wide to
long format.
If ``between`` or ``within`` is a list (e.g. ['col1', 'col2']),
the function returns 1) the pairwise T-tests between each values of the
first column, 2) the pairwise T-tests between each values of the second
column and 3) the interaction between col1 and col2. The interaction is
dependent of the order of the list, so ['col1', 'col2'] will not yield the
same results as ['col2', 'col1']. Furthermore, the interaction will only be
calculated if ``interaction=True``.
If ``between`` is a list with two elements, the output
model is between1 + between2 + between1 * between2.
Similarly, if ``within`` is a list with two elements, the output model is
within1 + within2 + within1 * within2.
If both ``between`` and ``within`` are specified, the output model is
within + between + within * between (= mixed design), unless
``within_first=False`` in which case the model becomes between + within +
between * within.
Missing values in repeated measurements are automatically removed using a
listwise (default) or pairwise deletion strategy. The former is more conservative, as any
subject with one or more missing value(s) will be completely removed from the dataframe prior
to calculating the T-tests. The ``nan_policy`` parameter can therefore have a huge impact
on the results.
Examples
--------
For more examples, please refer to the `Jupyter notebooks
<https://github.com/raphaelvallat/pingouin/blob/master/notebooks/01_ANOVA.ipynb>`_
1. One between-subject factor
>>> import pandas as pd
>>> import pingouin as pg
>>> pd.set_option('display.expand_frame_repr', False)
>>> pd.set_option('display.max_columns', 20)
>>> df = pg.read_dataset('mixed_anova.csv')
>>> pg.pairwise_tests(dv='Scores', between='Group', data=df).round(3)
Contrast A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Group Control Meditation False True -2.29 178.0 two-sided 0.023 1.813 -0.34
2. One within-subject factor
>>> post_hocs = pg.pairwise_tests(dv='Scores', within='Time', subject='Subject', data=df)
>>> post_hocs.round(3)
Contrast A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Time August January True True -1.740 59.0 two-sided 0.087 0.582 -0.328
1 Time August June True True -2.743 59.0 two-sided 0.008 4.232 -0.483
2 Time January June True True -1.024 59.0 two-sided 0.310 0.232 -0.170
3. Non-parametric pairwise paired test (wilcoxon)
>>> pg.pairwise_tests(dv='Scores', within='Time', subject='Subject',
... data=df, parametric=False).round(3)
Contrast A B Paired Parametric W-val alternative p-unc hedges
0 Time August January True False 716.0 two-sided 0.144 -0.328
1 Time August June True False 564.0 two-sided 0.010 -0.483
2 Time January June True False 887.0 two-sided 0.840 -0.170
4. Mixed design (within and between) with bonferroni-corrected p-values
>>> posthocs = pg.pairwise_tests(dv='Scores', within='Time', subject='Subject',
... between='Group', padjust='bonf', data=df)
>>> posthocs.round(3)
Contrast Time A B Paired Parametric T dof alternative p-unc p-corr p-adjust BF10 hedges
0 Time - August January True True -1.740 59.0 two-sided 0.087 0.261 bonf 0.582 -0.328
1 Time - August June True True -2.743 59.0 two-sided 0.008 0.024 bonf 4.232 -0.483
2 Time - January June True True -1.024 59.0 two-sided 0.310 0.931 bonf 0.232 -0.170
3 Group - Control Meditation False True -2.248 58.0 two-sided 0.028 NaN NaN 2.096 -0.573
4 Time * Group August Control Meditation False True 0.316 58.0 two-sided 0.753 1.000 bonf 0.274 0.081
5 Time * Group January Control Meditation False True -1.434 58.0 two-sided 0.157 0.471 bonf 0.619 -0.365
6 Time * Group June Control Meditation False True -2.744 58.0 two-sided 0.008 0.024 bonf 5.593 -0.699
5. Two between-subject factors. The order of the ``between`` factors matters!
>>> pg.pairwise_tests(dv='Scores', between=['Group', 'Time'], data=df).round(3)
Contrast Group A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Group - Control Meditation False True -2.290 178.0 two-sided 0.023 1.813 -0.340
1 Time - August January False True -1.806 118.0 two-sided 0.074 0.839 -0.328
2 Time - August June False True -2.660 118.0 two-sided 0.009 4.499 -0.483
3 Time - January June False True -0.934 118.0 two-sided 0.352 0.288 -0.170
4 Group * Time Control August January False True -0.383 58.0 two-sided 0.703 0.279 -0.098
5 Group * Time Control August June False True -0.292 58.0 two-sided 0.771 0.272 -0.074
6 Group * Time Control January June False True 0.045 58.0 two-sided 0.964 0.263 0.011
7 Group * Time Meditation August January False True -2.188 58.0 two-sided 0.033 1.884 -0.558
8 Group * Time Meditation August June False True -4.040 58.0 two-sided 0.000 148.302 -1.030
9 Group * Time Meditation January June False True -1.442 58.0 two-sided 0.155 0.625 -0.367
6. Same but without the interaction, and using a directional test
>>> df.pairwise_tests(dv='Scores', between=['Group', 'Time'], alternative="less",
... interaction=False).round(3)
Contrast A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Group Control Meditation False True -2.290 178.0 less 0.012 3.626 -0.340
1 Time August January False True -1.806 118.0 less 0.037 1.679 -0.328
2 Time August June False True -2.660 118.0 less 0.004 8.998 -0.483
3 Time January June False True -0.934 118.0 less 0.176 0.577 -0.170
| @pf.register_dataframe_method
def pairwise_tests(
data=None,
dv=None,
between=None,
within=None,
subject=None,
parametric=True,
marginal=True,
alpha=0.05,
alternative="two-sided",
padjust="none",
effsize="hedges",
correction="auto",
nan_policy="listwise",
return_desc=False,
interaction=True,
within_first=True,
):
"""Pairwise tests.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
dv : string
Name of column containing the dependent variable.
between : string or list with 2 elements
Name of column(s) containing the between-subject factor(s).
within : string or list with 2 elements
Name of column(s) containing the within-subject factor(s), i.e. the
repeated measurements.
subject : string
Name of column containing the subject identifier. This is mandatory
when ``within`` is specified.
parametric : boolean
If True (default), use the parametric :py:func:`ttest` function.
If False, use :py:func:`pingouin.wilcoxon` or :py:func:`pingouin.mwu`
for paired or unpaired samples, respectively.
marginal : boolean
If True (default), the between-subject pairwise T-test(s) will be calculated
after averaging across all levels of the within-subject factor in mixed
design. This is recommended to avoid violating the assumption of
independence and conflating the degrees of freedom by the
number of repeated measurements.
.. versionadded:: 0.3.2
alpha : float
Significance level
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return one-sided
p-values. "greater" tests against the alternative hypothesis that the mean of ``x``
is greater than the mean of ``y``.
padjust : string
Method used for testing and adjustment of pvalues.
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
effsize : string or None
Effect size type. Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
correction : string or boolean
For independent two sample T-tests, specify whether or not to correct for
unequal variances using Welch separate variances T-test. If `'auto'`,
it will automatically uses Welch T-test when the sample sizes are
unequal, as recommended by Zimmerman 2004.
.. versionadded:: 0.3.2
nan_policy : string
Can be `'listwise'` for listwise deletion of missing values in repeated
measures design (= complete-case analysis) or `'pairwise'` for the
more liberal pairwise deletion (= available-case analysis). The former (default) is more
appropriate for post-hoc analysis following an ANOVA, however it can drastically reduce
the power of the test: any subject with one or more missing value(s) will be
completely removed from the analysis.
.. versionadded:: 0.2.9
return_desc : boolean
If True, append group means and std to the output dataframe
interaction : boolean
If there are multiple factors and ``interaction`` is True (default),
Pingouin will also calculate T-tests for the interaction term (see Notes).
.. versionadded:: 0.2.9
within_first : boolean
Determines the order of the interaction in mixed design. Pingouin will
return within * between when this parameter is set to True (default),
and between * within otherwise.
.. versionadded:: 0.3.6
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'Contrast'``: Contrast (= independent variable or interaction)
* ``'A'``: Name of first measurement
* ``'B'``: Name of second measurement
* ``'Paired'``: indicates whether the two measurements are paired or
independent
* ``'Parametric'``: indicates if (non)-parametric tests were used
* ``'T'``: T statistic (only if parametric=True)
* ``'U-val'``: Mann-Whitney U stat (if parametric=False and unpaired
data)
* ``'W-val'``: Wilcoxon W stat (if parametric=False and paired data)
* ``'dof'``: degrees of freedom (only if parametric=True)
* ``'alternative'``: tail of the test
* ``'p-unc'``: Uncorrected p-values
* ``'p-corr'``: Corrected p-values
* ``'p-adjust'``: p-values correction method
* ``'BF10'``: Bayes Factor
* ``'hedges'``: effect size (or any effect size defined in
``effsize``)
See also
--------
ttest, mwu, wilcoxon, compute_effsize, multicomp
Notes
-----
Data are expected to be in long-format. If your data is in wide-format,
you can use the :py:func:`pandas.melt` function to convert from wide to
long format.
If ``between`` or ``within`` is a list (e.g. ['col1', 'col2']),
the function returns 1) the pairwise T-tests between each values of the
first column, 2) the pairwise T-tests between each values of the second
column and 3) the interaction between col1 and col2. The interaction is
dependent of the order of the list, so ['col1', 'col2'] will not yield the
same results as ['col2', 'col1']. Furthermore, the interaction will only be
calculated if ``interaction=True``.
If ``between`` is a list with two elements, the output
model is between1 + between2 + between1 * between2.
Similarly, if ``within`` is a list with two elements, the output model is
within1 + within2 + within1 * within2.
If both ``between`` and ``within`` are specified, the output model is
within + between + within * between (= mixed design), unless
``within_first=False`` in which case the model becomes between + within +
between * within.
Missing values in repeated measurements are automatically removed using a
listwise (default) or pairwise deletion strategy. The former is more conservative, as any
subject with one or more missing value(s) will be completely removed from the dataframe prior
to calculating the T-tests. The ``nan_policy`` parameter can therefore have a huge impact
on the results.
Examples
--------
For more examples, please refer to the `Jupyter notebooks
<https://github.com/raphaelvallat/pingouin/blob/master/notebooks/01_ANOVA.ipynb>`_
1. One between-subject factor
>>> import pandas as pd
>>> import pingouin as pg
>>> pd.set_option('display.expand_frame_repr', False)
>>> pd.set_option('display.max_columns', 20)
>>> df = pg.read_dataset('mixed_anova.csv')
>>> pg.pairwise_tests(dv='Scores', between='Group', data=df).round(3)
Contrast A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Group Control Meditation False True -2.29 178.0 two-sided 0.023 1.813 -0.34
2. One within-subject factor
>>> post_hocs = pg.pairwise_tests(dv='Scores', within='Time', subject='Subject', data=df)
>>> post_hocs.round(3)
Contrast A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Time August January True True -1.740 59.0 two-sided 0.087 0.582 -0.328
1 Time August June True True -2.743 59.0 two-sided 0.008 4.232 -0.483
2 Time January June True True -1.024 59.0 two-sided 0.310 0.232 -0.170
3. Non-parametric pairwise paired test (wilcoxon)
>>> pg.pairwise_tests(dv='Scores', within='Time', subject='Subject',
... data=df, parametric=False).round(3)
Contrast A B Paired Parametric W-val alternative p-unc hedges
0 Time August January True False 716.0 two-sided 0.144 -0.328
1 Time August June True False 564.0 two-sided 0.010 -0.483
2 Time January June True False 887.0 two-sided 0.840 -0.170
4. Mixed design (within and between) with bonferroni-corrected p-values
>>> posthocs = pg.pairwise_tests(dv='Scores', within='Time', subject='Subject',
... between='Group', padjust='bonf', data=df)
>>> posthocs.round(3)
Contrast Time A B Paired Parametric T dof alternative p-unc p-corr p-adjust BF10 hedges
0 Time - August January True True -1.740 59.0 two-sided 0.087 0.261 bonf 0.582 -0.328
1 Time - August June True True -2.743 59.0 two-sided 0.008 0.024 bonf 4.232 -0.483
2 Time - January June True True -1.024 59.0 two-sided 0.310 0.931 bonf 0.232 -0.170
3 Group - Control Meditation False True -2.248 58.0 two-sided 0.028 NaN NaN 2.096 -0.573
4 Time * Group August Control Meditation False True 0.316 58.0 two-sided 0.753 1.000 bonf 0.274 0.081
5 Time * Group January Control Meditation False True -1.434 58.0 two-sided 0.157 0.471 bonf 0.619 -0.365
6 Time * Group June Control Meditation False True -2.744 58.0 two-sided 0.008 0.024 bonf 5.593 -0.699
5. Two between-subject factors. The order of the ``between`` factors matters!
>>> pg.pairwise_tests(dv='Scores', between=['Group', 'Time'], data=df).round(3)
Contrast Group A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Group - Control Meditation False True -2.290 178.0 two-sided 0.023 1.813 -0.340
1 Time - August January False True -1.806 118.0 two-sided 0.074 0.839 -0.328
2 Time - August June False True -2.660 118.0 two-sided 0.009 4.499 -0.483
3 Time - January June False True -0.934 118.0 two-sided 0.352 0.288 -0.170
4 Group * Time Control August January False True -0.383 58.0 two-sided 0.703 0.279 -0.098
5 Group * Time Control August June False True -0.292 58.0 two-sided 0.771 0.272 -0.074
6 Group * Time Control January June False True 0.045 58.0 two-sided 0.964 0.263 0.011
7 Group * Time Meditation August January False True -2.188 58.0 two-sided 0.033 1.884 -0.558
8 Group * Time Meditation August June False True -4.040 58.0 two-sided 0.000 148.302 -1.030
9 Group * Time Meditation January June False True -1.442 58.0 two-sided 0.155 0.625 -0.367
6. Same but without the interaction, and using a directional test
>>> df.pairwise_tests(dv='Scores', between=['Group', 'Time'], alternative="less",
... interaction=False).round(3)
Contrast A B Paired Parametric T dof alternative p-unc BF10 hedges
0 Group Control Meditation False True -2.290 178.0 less 0.012 3.626 -0.340
1 Time August January False True -1.806 118.0 less 0.037 1.679 -0.328
2 Time August June False True -2.660 118.0 less 0.004 8.998 -0.483
3 Time January June False True -0.934 118.0 less 0.176 0.577 -0.170
"""
from .parametric import ttest
from .nonparametric import wilcoxon, mwu
# Safety checks
data = _check_dataframe(
dv=dv, between=between, within=within, subject=subject, effects="all", data=data
)
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
assert isinstance(alpha, float), "alpha must be float."
assert nan_policy in ["listwise", "pairwise"]
# Check if we have multiple between or within factors
multiple_between = False
multiple_within = False
contrast = None
if isinstance(between, list):
if len(between) > 1:
multiple_between = True
contrast = "multiple_between"
assert all([b in data.keys() for b in between])
else:
between = between[0]
if isinstance(within, list):
if len(within) > 1:
multiple_within = True
contrast = "multiple_within"
assert all([w in data.keys() for w in within])
else:
within = within[0]
if all([multiple_within, multiple_between]):
raise ValueError(
"Multiple between and within factors are currently not supported. "
"Please select only one."
)
# Check the other cases. Between and within column names can be str or int (not float).
if isinstance(between, (str, int)) and within is None:
contrast = "simple_between"
assert between in data.keys()
if isinstance(within, (str, int)) and between is None:
contrast = "simple_within"
assert within in data.keys()
if isinstance(between, (str, int)) and isinstance(within, (str, int)):
contrast = "within_between"
assert all([between in data.keys(), within in data.keys()])
# Create col_order
col_order = [
"Contrast",
"Time",
"A",
"B",
"mean(A)",
"std(A)",
"mean(B)",
"std(B)",
"Paired",
"Parametric",
"T",
"U-val",
"W-val",
"dof",
"alternative",
"p-unc",
"p-corr",
"p-adjust",
"BF10",
effsize,
]
# If repeated measures, pivot and melt the table. This has several effects:
# 1) Force missing values to be explicit (a NaN cell is created)
# 2) Automatic collapsing to the mean if multiple within factors are present
# 3) If using dropna, remove rows with missing values (listwise deletion).
# The latter is the same behavior as JASP (= strict complete-case analysis).
if within is not None:
idx_piv = subject if between is None else [subject, between]
data_piv = data.pivot_table(index=idx_piv, columns=within, values=dv, observed=True)
if nan_policy == "listwise":
# Remove rows (= subject) with missing values. For pairwise deletion, missing values
# will be removed directly in the lower-level functions (e.g. pg.ttest)
data_piv = data_piv.dropna()
data = data_piv.melt(ignore_index=False, value_name=dv).reset_index()
if contrast in ["simple_within", "simple_between"]:
# OPTION A: SIMPLE MAIN EFFECTS, WITHIN OR BETWEEN
paired = True if contrast == "simple_within" else False
col = within if contrast == "simple_within" else between
# Extract levels of the grouping variable, sorted in alphabetical order
grp_col = data.groupby(col, sort=True, observed=True)[dv]
labels = grp_col.groups.keys()
# Number and labels of possible comparisons
if len(labels) >= 2:
combs = list(combinations(labels, 2))
combs = np.array(combs)
A = combs[:, 0]
B = combs[:, 1]
else:
raise ValueError("Columns must have at least two unique values.")
# Initialize dataframe
stats = pd.DataFrame(dtype=np.float64, index=range(len(combs)), columns=col_order)
# Force dtype conversion
cols_str = ["Contrast", "Time", "A", "B", "alternative", "p-adjust", "BF10"]
cols_bool = ["Parametric", "Paired"]
stats[cols_str] = stats[cols_str].astype(object)
stats[cols_bool] = stats[cols_bool].astype(bool)
# Fill str columns
stats.loc[:, "A"] = A
stats.loc[:, "B"] = B
stats.loc[:, "Contrast"] = col
stats.loc[:, "alternative"] = alternative
stats.loc[:, "Paired"] = paired
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
for i in range(stats.shape[0]):
col1, col2 = stats.at[i, "A"], stats.at[i, "B"]
x = grp_col.get_group(col1).to_numpy(dtype=np.float64)
y = grp_col.get_group(col2).to_numpy(dtype=np.float64)
if parametric:
stat_name = "T"
df_ttest = ttest(
x, y, paired=paired, alternative=alternative, correction=correction
)
stats.at[i, "BF10"] = df_ttest.at["T-test", "BF10"]
stats.at[i, "dof"] = df_ttest.at["T-test", "dof"]
else:
if paired:
stat_name = "W-val"
df_ttest = wilcoxon(x, y, alternative=alternative)
else:
stat_name = "U-val"
df_ttest = mwu(x, y, alternative=alternative)
options.update(old_options) # restore options
# Compute Hedges / Cohen
ef = compute_effsize(x=x, y=y, eftype=effsize, paired=paired)
if return_desc:
stats.at[i, "mean(A)"] = np.nanmean(x)
stats.at[i, "mean(B)"] = np.nanmean(y)
stats.at[i, "std(A)"] = np.nanstd(x, ddof=1)
stats.at[i, "std(B)"] = np.nanstd(y, ddof=1)
stats.at[i, stat_name] = df_ttest[stat_name].iat[0]
stats.at[i, "p-unc"] = df_ttest["p-val"].iat[0]
stats.at[i, effsize] = ef
# Multiple comparisons
padjust = None if stats["p-unc"].size <= 1 else padjust
if padjust is not None:
if padjust.lower() != "none":
_, stats["p-corr"] = multicomp(
stats["p-unc"].to_numpy(), alpha=alpha, method=padjust
)
stats["p-adjust"] = padjust
else:
stats["p-corr"] = None
stats["p-adjust"] = None
else:
# Multiple factors
if contrast == "multiple_between":
# B1: BETWEEN1 + BETWEEN2 + BETWEEN1 * BETWEEN2
factors = between
fbt = factors
fwt = [None, None]
paired = False # the interaction is not paired
agg = [False, False]
# TODO: add a pool SD option, as in JASP and JAMOVI?
elif contrast == "multiple_within":
# B2: WITHIN1 + WITHIN2 + WITHIN1 * WITHIN2
factors = within
fbt = [None, None]
fwt = factors
paired = True
agg = [True, True] # Calculate marginal means for both factors
else:
# B3: WITHIN + BETWEEN + INTERACTION
# Decide which order should be reported
if within_first:
# within + between + within * between
factors = [within, between]
fbt = [None, between]
fwt = [within, None]
paired = False # only for interaction
agg = [False, True]
else:
# between + within + between * within
factors = [between, within]
fbt = [between, None]
fwt = [None, within]
paired = True
agg = [True, False]
stats = pd.DataFrame()
for i, f in enumerate(factors):
# Introduced in Pingouin v0.3.2
# Note that is only has an impact in the between test of mixed
# designs. Indeed, a similar groupby is applied by default on
# each within-subject factor of a two-way repeated measures design.
if all([agg[i], marginal]):
tmp = data.groupby([subject, f], as_index=False, observed=True, sort=True).mean(
numeric_only=True
)
else:
tmp = data
pt = pairwise_tests(
dv=dv,
between=fbt[i],
within=fwt[i],
subject=subject,
data=tmp,
parametric=parametric,
marginal=marginal,
alpha=alpha,
alternative=alternative,
padjust=padjust,
effsize=effsize,
correction=correction,
nan_policy=nan_policy,
return_desc=return_desc,
)
stats = pd.concat([stats, pt], axis=0, ignore_index=True, sort=False)
# Then compute the interaction between the factors
if interaction:
nrows = stats.shape[0]
# BUGFIX 0.3.9: If subject is present, make sure that we respect
# the order of subjects.
if subject is not None:
data = data.set_index(subject).sort_index()
# Extract interaction levels, sorted in alphabetical order
grp_fac1 = data.groupby(factors[0], observed=True, sort=True)[dv]
grp_fac2 = data.groupby(factors[1], observed=True, sort=True)[dv]
grp_both = data.groupby(factors, observed=True, sort=True)[dv]
labels_fac1 = grp_fac1.groups.keys()
labels_fac2 = grp_fac2.groups.keys()
# comb_fac1 = list(combinations(labels_fac1, 2))
comb_fac2 = list(combinations(labels_fac2, 2))
# Pairwise comparisons
combs_list = list(product(labels_fac1, comb_fac2))
ncombs = len(combs_list)
# np.array(combs_list) does not work because of tuples
# we therefore need to flatten the tupple
combs = np.zeros(shape=(ncombs, 3), dtype=object)
for i in range(ncombs):
combs[i] = _flatten_list(combs_list[i], include_tuple=True)
# Append empty rows
idxiter = np.arange(nrows, nrows + ncombs)
stats = stats.reindex(stats.index.union(idxiter))
# Update other columns
stats.loc[idxiter, "Contrast"] = factors[0] + " * " + factors[1]
stats.loc[idxiter, "Time"] = combs[:, 0]
stats.loc[idxiter, "Paired"] = paired
stats.loc[idxiter, "alternative"] = alternative
stats.loc[idxiter, "A"] = combs[:, 1]
stats.loc[idxiter, "B"] = combs[:, 2]
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
for i, comb in enumerate(combs):
ic = nrows + i # Take into account previous rows
fac1, col1, col2 = comb
x = grp_both.get_group((fac1, col1)).to_numpy(dtype=np.float64)
y = grp_both.get_group((fac1, col2)).to_numpy(dtype=np.float64)
ef = compute_effsize(x=x, y=y, eftype=effsize, paired=paired)
if parametric:
stat_name = "T"
df_ttest = ttest(
x, y, paired=paired, alternative=alternative, correction=correction
)
stats.at[ic, "BF10"] = df_ttest.at["T-test", "BF10"]
stats.at[ic, "dof"] = df_ttest.at["T-test", "dof"]
else:
if paired:
stat_name = "W-val"
df_ttest = wilcoxon(x, y, alternative=alternative)
else:
stat_name = "U-val"
df_ttest = mwu(x, y, alternative=alternative)
options.update(old_options) # restore options
# Append to stats
if return_desc:
stats.at[ic, "mean(A)"] = np.nanmean(x)
stats.at[ic, "mean(B)"] = np.nanmean(y)
stats.at[ic, "std(A)"] = np.nanstd(x, ddof=1)
stats.at[ic, "std(B)"] = np.nanstd(y, ddof=1)
stats.at[ic, stat_name] = df_ttest[stat_name].iat[0]
stats.at[ic, "p-unc"] = df_ttest["p-val"].iat[0]
stats.at[ic, effsize] = ef
# Multi-comparison columns
if padjust is not None and padjust.lower() != "none":
_, pcor = multicomp(
stats.loc[idxiter, "p-unc"].to_numpy(), alpha=alpha, method=padjust
)
stats.loc[idxiter, "p-corr"] = pcor
stats.loc[idxiter, "p-adjust"] = padjust
# ---------------------------------------------------------------------
# Append parametric columns
stats.loc[:, "Parametric"] = parametric
# Reorder and drop empty columns
stats = stats[np.array(col_order)[np.isin(col_order, stats.columns)]]
stats = stats.dropna(how="all", axis=1)
# Rename Time columns
if contrast in ["multiple_within", "multiple_between", "within_between"] and interaction:
stats["Time"].fillna("-", inplace=True)
stats.rename(columns={"Time": factors[0]}, inplace=True)
return _postprocess_dataframe(stats)
| (data=None, dv=None, between=None, within=None, subject=None, parametric=True, marginal=True, alpha=0.05, alternative='two-sided', padjust='none', effsize='hedges', correction='auto', nan_policy='listwise', return_desc=False, interaction=True, within_first=True) |
32,033 | pingouin.pairwise | pairwise_ttests | This function has been deprecated . Use :py:func:`pingouin.pairwise_tests` instead. | @pf.register_dataframe_method
def pairwise_ttests(*args, **kwargs):
"""This function has been deprecated . Use :py:func:`pingouin.pairwise_tests` instead."""
warnings.warn("pairwise_ttests is deprecated, use pairwise_tests instead.", UserWarning)
return pairwise_tests(*args, **kwargs)
| (*args, **kwargs) |
32,034 | pingouin.pairwise | pairwise_tukey | Pairwise Tukey-HSD post-hoc test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a Pandas method, in which
case this argument is no longer needed.
dv : string
Name of column containing the dependent variable.
between: string
Name of column containing the between factor.
effsize : string or None
Effect size type. Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'A'``: Name of first measurement
* ``'B'``: Name of second measurement
* ``'mean(A)'``: Mean of first measurement
* ``'mean(B)'``: Mean of second measurement
* ``'diff'``: Mean difference (= mean(A) - mean(B))
* ``'se'``: Standard error
* ``'T'``: T-values
* ``'p-tukey'``: Tukey-HSD corrected p-values
* ``'hedges'``: Hedges effect size (or any effect size defined in
``effsize``)
See also
--------
pairwise_tests, pairwise_gameshowell
Notes
-----
Tukey HSD post-hoc [1]_ is best for balanced one-way ANOVA.
It has been proven to be conservative for one-way ANOVA with unequal sample sizes. However, it
is not robust if the groups have unequal variances, in which case the Games-Howell test is
more adequate. Tukey HSD is not valid for repeated measures ANOVA. Only one-way ANOVA design
are supported.
The T-values are defined as:
.. math::
t = \frac{\overline{x}_i - \overline{x}_j}
{\sqrt{2 \cdot \text{MS}_w / n}}
where :math:`\overline{x}_i` and :math:`\overline{x}_j` are the means of the first and
second group, respectively, :math:`\text{MS}_w` the mean squares of the error (computed using
ANOVA) and :math:`n` the sample size.
If the sample sizes are unequal, the Tukey-Kramer procedure is automatically used:
.. math::
t = \frac{\overline{x}_i - \overline{x}_j}{\sqrt{\frac{MS_w}{n_i}
+ \frac{\text{MS}_w}{n_j}}}
where :math:`n_i` and :math:`n_j` are the sample sizes of the first and second group,
respectively.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\sqrt2|t_i|, r, N - r)` where :math:`r` is the total number of groups and
:math:`N` is the total sample size.
References
----------
.. [1] Tukey, John W. "Comparing individual means in the analysis of
variance." Biometrics (1949): 99-114.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Tukey post-hocs on the Penguins dataset.
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> df.pairwise_tukey(dv='body_mass_g', between='species').round(3)
A B mean(A) mean(B) diff se T p-tukey hedges
0 Adelie Chinstrap 3700.662 3733.088 -32.426 67.512 -0.480 0.881 -0.074
1 Adelie Gentoo 3700.662 5076.016 -1375.354 56.148 -24.495 0.000 -2.860
2 Chinstrap Gentoo 3733.088 5076.016 -1342.928 69.857 -19.224 0.000 -2.875
| @pf.register_dataframe_method
def pairwise_tukey(data=None, dv=None, between=None, effsize="hedges"):
"""Pairwise Tukey-HSD post-hoc test.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a Pandas method, in which
case this argument is no longer needed.
dv : string
Name of column containing the dependent variable.
between: string
Name of column containing the between factor.
effsize : string or None
Effect size type. Available methods are:
* ``'none'``: no effect size
* ``'cohen'``: Unbiased Cohen d
* ``'hedges'``: Hedges g
* ``'r'``: Pearson correlation coefficient
* ``'eta-square'``: Eta-square
* ``'odds-ratio'``: Odds ratio
* ``'AUC'``: Area Under the Curve
* ``'CLES'``: Common Language Effect Size
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'A'``: Name of first measurement
* ``'B'``: Name of second measurement
* ``'mean(A)'``: Mean of first measurement
* ``'mean(B)'``: Mean of second measurement
* ``'diff'``: Mean difference (= mean(A) - mean(B))
* ``'se'``: Standard error
* ``'T'``: T-values
* ``'p-tukey'``: Tukey-HSD corrected p-values
* ``'hedges'``: Hedges effect size (or any effect size defined in
``effsize``)
See also
--------
pairwise_tests, pairwise_gameshowell
Notes
-----
Tukey HSD post-hoc [1]_ is best for balanced one-way ANOVA.
It has been proven to be conservative for one-way ANOVA with unequal sample sizes. However, it
is not robust if the groups have unequal variances, in which case the Games-Howell test is
more adequate. Tukey HSD is not valid for repeated measures ANOVA. Only one-way ANOVA design
are supported.
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{2 \\cdot \\text{MS}_w / n}}
where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of the first and
second group, respectively, :math:`\\text{MS}_w` the mean squares of the error (computed using
ANOVA) and :math:`n` the sample size.
If the sample sizes are unequal, the Tukey-Kramer procedure is automatically used:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i}
+ \\frac{\\text{MS}_w}{n_j}}}
where :math:`n_i` and :math:`n_j` are the sample sizes of the first and second group,
respectively.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2|t_i|, r, N - r)` where :math:`r` is the total number of groups and
:math:`N` is the total sample size.
References
----------
.. [1] Tukey, John W. "Comparing individual means in the analysis of
variance." Biometrics (1949): 99-114.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Tukey post-hocs on the Penguins dataset.
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> df.pairwise_tukey(dv='body_mass_g', between='species').round(3)
A B mean(A) mean(B) diff se T p-tukey hedges
0 Adelie Chinstrap 3700.662 3733.088 -32.426 67.512 -0.480 0.881 -0.074
1 Adelie Gentoo 3700.662 5076.016 -1375.354 56.148 -24.495 0.000 -2.860
2 Chinstrap Gentoo 3733.088 5076.016 -1342.928 69.857 -19.224 0.000 -2.875
"""
# First compute the ANOVA
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
aov = anova(dv=dv, data=data, between=between, detailed=True)
options.update(old_options) # Restore original options
df = aov.at[1, "DF"]
ng = aov.at[0, "DF"] + 1
grp = data.groupby(between, observed=True)[dv] # default is sort=True
# Careful: pd.unique does NOT sort whereas numpy does
# The line below should be equal to labels = np.unique(data[between])
# However, this does not work if between is a Categorical column, because
# Pandas applies a custom, not alphabetical, sorting.
# See https://github.com/raphaelvallat/pingouin/issues/111
labels = np.array(list(grp.groups.keys()))
n = grp.count().to_numpy()
gmeans = grp.mean(numeric_only=True).to_numpy()
gvar = aov.at[1, "MS"] / n
# Pairwise combinations
g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T
mn = gmeans[g1] - gmeans[g2]
se = np.sqrt(gvar[g1] + gvar[g2])
tval = mn / se
# Critical values and p-values
# crit = studentized_range.ppf(1 - alpha, ng, df) / np.sqrt(2)
pval = studentized_range.sf(np.sqrt(2) * np.abs(tval), ng, df)
pval = np.clip(pval, 0, 1)
# Uncorrected p-values
# from scipy.stats import t
# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2
# Effect size
# Method 1: Approximation
# d = tval * np.sqrt(1 / n[g1] + 1 / n[g2])
# ef = convert_effsize(d, "cohen", effsize, n[g1], n[g2])
# Method 2: Exact
ef = []
for idx_a, idx_b in zip(g1, g2):
ef.append(
compute_effsize(
grp.get_group(labels[idx_a]),
grp.get_group(labels[idx_b]),
paired=False,
eftype=effsize,
)
)
# Create dataframe
stats = pd.DataFrame(
{
"A": labels[g1],
"B": labels[g2],
"mean(A)": gmeans[g1],
"mean(B)": gmeans[g2],
"diff": mn,
"se": se,
"T": tval,
"p-tukey": pval,
effsize: ef,
}
)
return _postprocess_dataframe(stats)
| (data=None, dv=None, between=None, effsize='hedges') |
32,036 | pingouin.correlation | partial_corr | Partial and semi-partial correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Pandas Dataframe. Note that this function can also directly be used
as a :py:class:`pandas.DataFrame` method, in which case this argument
is no longer needed.
x, y : string
x and y. Must be names of columns in ``data``.
covar : string or list
Covariate(s). Must be a names of columns in ``data``. Use a list if
there are two or more covariates.
x_covar : string or list
Covariate(s) for the ``x`` variable. This is used to compute
semi-partial correlation (i.e. the effect of ``x_covar`` is removed
from ``x`` but not from ``y``). Only one of ``covar``, ``x_covar`` and
``y_covar`` can be specified.
y_covar : string or list
Covariate(s) for the ``y`` variable. This is used to compute
semi-partial correlation (i.e. the effect of ``y_covar`` is removed
from ``y`` but not from ``x``). Only one of ``covar``, ``x_covar`` and
``y_covar`` can be specified.
alternative : string
Defines the alternative hypothesis, or tail of the partial correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the partial correlation is
positive (greater than zero), "less" tests against the hypothesis that the partial
correlation is negative.
method : string
Correlation type:
* ``'pearson'``: Pearson :math:`r` product-moment correlation
* ``'spearman'``: Spearman :math:`\rho` rank-order correlation
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'n'``: Sample size (after removal of missing values)
* ``'r'``: Partial correlation coefficient
* ``'CI95'``: 95% parametric confidence intervals around :math:`r`
* ``'p-val'``: p-value
See also
--------
corr, pcorr, pairwise_corr, rm_corr
Notes
-----
Partial correlation [1]_ measures the degree of association between ``x``
and ``y``, after removing the effect of one or more controlling variables
(``covar``, or :math:`Z`). Practically, this is achieved by calculating the
correlation coefficient between the residuals of two linear regressions:
.. math:: x \sim Z, y \sim Z
Like the correlation coefficient, the partial correlation
coefficient takes on a value in the range from –1 to 1, where 1 indicates a
perfect positive association.
The semipartial correlation is similar to the partial correlation,
with the exception that the set of controlling variables is only
removed for either ``x`` or ``y``, but not both.
Pingouin uses the method described in [2]_ to calculate the (semi)partial
correlation coefficients and associated p-values. This method is based on
the inverse covariance matrix and is significantly faster than the
traditional regression-based method. Results have been tested against the
`ppcor <https://cran.r-project.org/web/packages/ppcor/index.html>`_
R package.
.. important:: Rows with missing values are automatically removed from
data.
References
----------
.. [1] https://en.wikipedia.org/wiki/Partial_correlation
.. [2] https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4681537/
Examples
--------
1. Partial correlation with one covariate
>>> import pingouin as pg
>>> df = pg.read_dataset('partial_corr')
>>> pg.partial_corr(data=df, x='x', y='y', covar='cv1').round(3)
n r CI95% p-val
pearson 30 0.568 [0.25, 0.77] 0.001
2. Spearman partial correlation with several covariates
>>> # Partial correlation of x and y controlling for cv1, cv2 and cv3
>>> pg.partial_corr(data=df, x='x', y='y', covar=['cv1', 'cv2', 'cv3'],
... method='spearman').round(3)
n r CI95% p-val
spearman 30 0.521 [0.18, 0.75] 0.005
3. Same but one-sided test
>>> pg.partial_corr(data=df, x='x', y='y', covar=['cv1', 'cv2', 'cv3'],
... alternative="greater", method='spearman').round(3)
n r CI95% p-val
spearman 30 0.521 [0.24, 1.0] 0.003
>>> pg.partial_corr(data=df, x='x', y='y', covar=['cv1', 'cv2', 'cv3'],
... alternative="less", method='spearman').round(3)
n r CI95% p-val
spearman 30 0.521 [-1.0, 0.72] 0.997
4. As a pandas method
>>> df.partial_corr(x='x', y='y', covar=['cv1'], method='spearman').round(3)
n r CI95% p-val
spearman 30 0.578 [0.27, 0.78] 0.001
5. Partial correlation matrix (returns only the correlation coefficients)
>>> df.pcorr().round(3)
x y cv1 cv2 cv3
x 1.000 0.493 -0.095 0.130 -0.385
y 0.493 1.000 -0.007 0.104 -0.002
cv1 -0.095 -0.007 1.000 -0.241 -0.470
cv2 0.130 0.104 -0.241 1.000 -0.118
cv3 -0.385 -0.002 -0.470 -0.118 1.000
6. Semi-partial correlation on x
>>> pg.partial_corr(data=df, x='x', y='y', x_covar=['cv1', 'cv2', 'cv3']).round(3)
n r CI95% p-val
pearson 30 0.463 [0.1, 0.72] 0.015
| @pf.register_dataframe_method
def partial_corr(
data=None,
x=None,
y=None,
covar=None,
x_covar=None,
y_covar=None,
alternative="two-sided",
method="pearson",
):
"""Partial and semi-partial correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Pandas Dataframe. Note that this function can also directly be used
as a :py:class:`pandas.DataFrame` method, in which case this argument
is no longer needed.
x, y : string
x and y. Must be names of columns in ``data``.
covar : string or list
Covariate(s). Must be a names of columns in ``data``. Use a list if
there are two or more covariates.
x_covar : string or list
Covariate(s) for the ``x`` variable. This is used to compute
semi-partial correlation (i.e. the effect of ``x_covar`` is removed
from ``x`` but not from ``y``). Only one of ``covar``, ``x_covar`` and
``y_covar`` can be specified.
y_covar : string or list
Covariate(s) for the ``y`` variable. This is used to compute
semi-partial correlation (i.e. the effect of ``y_covar`` is removed
from ``y`` but not from ``x``). Only one of ``covar``, ``x_covar`` and
``y_covar`` can be specified.
alternative : string
Defines the alternative hypothesis, or tail of the partial correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the partial correlation is
positive (greater than zero), "less" tests against the hypothesis that the partial
correlation is negative.
method : string
Correlation type:
* ``'pearson'``: Pearson :math:`r` product-moment correlation
* ``'spearman'``: Spearman :math:`\\rho` rank-order correlation
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'n'``: Sample size (after removal of missing values)
* ``'r'``: Partial correlation coefficient
* ``'CI95'``: 95% parametric confidence intervals around :math:`r`
* ``'p-val'``: p-value
See also
--------
corr, pcorr, pairwise_corr, rm_corr
Notes
-----
Partial correlation [1]_ measures the degree of association between ``x``
and ``y``, after removing the effect of one or more controlling variables
(``covar``, or :math:`Z`). Practically, this is achieved by calculating the
correlation coefficient between the residuals of two linear regressions:
.. math:: x \\sim Z, y \\sim Z
Like the correlation coefficient, the partial correlation
coefficient takes on a value in the range from –1 to 1, where 1 indicates a
perfect positive association.
The semipartial correlation is similar to the partial correlation,
with the exception that the set of controlling variables is only
removed for either ``x`` or ``y``, but not both.
Pingouin uses the method described in [2]_ to calculate the (semi)partial
correlation coefficients and associated p-values. This method is based on
the inverse covariance matrix and is significantly faster than the
traditional regression-based method. Results have been tested against the
`ppcor <https://cran.r-project.org/web/packages/ppcor/index.html>`_
R package.
.. important:: Rows with missing values are automatically removed from
data.
References
----------
.. [1] https://en.wikipedia.org/wiki/Partial_correlation
.. [2] https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4681537/
Examples
--------
1. Partial correlation with one covariate
>>> import pingouin as pg
>>> df = pg.read_dataset('partial_corr')
>>> pg.partial_corr(data=df, x='x', y='y', covar='cv1').round(3)
n r CI95% p-val
pearson 30 0.568 [0.25, 0.77] 0.001
2. Spearman partial correlation with several covariates
>>> # Partial correlation of x and y controlling for cv1, cv2 and cv3
>>> pg.partial_corr(data=df, x='x', y='y', covar=['cv1', 'cv2', 'cv3'],
... method='spearman').round(3)
n r CI95% p-val
spearman 30 0.521 [0.18, 0.75] 0.005
3. Same but one-sided test
>>> pg.partial_corr(data=df, x='x', y='y', covar=['cv1', 'cv2', 'cv3'],
... alternative="greater", method='spearman').round(3)
n r CI95% p-val
spearman 30 0.521 [0.24, 1.0] 0.003
>>> pg.partial_corr(data=df, x='x', y='y', covar=['cv1', 'cv2', 'cv3'],
... alternative="less", method='spearman').round(3)
n r CI95% p-val
spearman 30 0.521 [-1.0, 0.72] 0.997
4. As a pandas method
>>> df.partial_corr(x='x', y='y', covar=['cv1'], method='spearman').round(3)
n r CI95% p-val
spearman 30 0.578 [0.27, 0.78] 0.001
5. Partial correlation matrix (returns only the correlation coefficients)
>>> df.pcorr().round(3)
x y cv1 cv2 cv3
x 1.000 0.493 -0.095 0.130 -0.385
y 0.493 1.000 -0.007 0.104 -0.002
cv1 -0.095 -0.007 1.000 -0.241 -0.470
cv2 0.130 0.104 -0.241 1.000 -0.118
cv3 -0.385 -0.002 -0.470 -0.118 1.000
6. Semi-partial correlation on x
>>> pg.partial_corr(data=df, x='x', y='y', x_covar=['cv1', 'cv2', 'cv3']).round(3)
n r CI95% p-val
pearson 30 0.463 [0.1, 0.72] 0.015
"""
from pingouin.utils import _flatten_list
# Safety check
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
assert method in [
"pearson",
"spearman",
], 'only "pearson" and "spearman" are supported for partial correlation.'
assert isinstance(data, pd.DataFrame), "data must be a pandas DataFrame."
assert data.shape[0] > 2, "Data must have at least 3 samples."
if covar is not None and (x_covar is not None or y_covar is not None):
raise ValueError("Cannot specify both covar and {x,y}_covar.")
if x_covar is not None and y_covar is not None:
raise ValueError("Cannot specify both x_covar and y_covar.")
assert x != y, "x and y must be independent"
if isinstance(covar, list):
assert x not in covar, "x and covar must be independent"
assert y not in covar, "y and covar must be independent"
else:
assert x != covar, "x and covar must be independent"
assert y != covar, "y and covar must be independent"
# Check that columns exist
col = _flatten_list([x, y, covar, x_covar, y_covar])
assert all([c in data for c in col]), "columns are not in dataframe."
# Check that columns are numeric
assert all([data[c].dtype.kind in "bfiu" for c in col])
# Drop rows with NaN
data = data[col].dropna()
n = data.shape[0] # Number of samples
k = data.shape[1] - 2 # Number of covariates
assert n > 2, "Data must have at least 3 non-NAN samples."
# Calculate the partial corrrelation matrix - similar to pingouin.pcorr()
if method == "spearman":
# Convert the data to rank, similar to R cov()
V = data.rank(na_option="keep").cov(numeric_only=True)
else:
V = data.cov(numeric_only=True)
Vi = np.linalg.pinv(V, hermitian=True) # Inverse covariance matrix
Vi_diag = Vi.diagonal()
D = np.diag(np.sqrt(1 / Vi_diag))
pcor = -1 * (D @ Vi @ D) # Partial correlation matrix
if covar is not None:
r = pcor[0, 1]
else:
# Semi-partial correlation matrix
with np.errstate(divide="ignore"):
spcor = (
pcor
/ np.sqrt(np.diag(V))[..., None]
/ np.sqrt(np.abs(Vi_diag - Vi**2 / Vi_diag[..., None])).T
)
if y_covar is not None:
r = spcor[0, 1] # y_covar is removed from y
else:
r = spcor[1, 0] # x_covar is removed from x
if np.isnan(r):
# Correlation failed. Return NaN. When would this happen?
return pd.DataFrame({"n": n, "r": np.nan, "CI95%": np.nan, "p-val": np.nan}, index=[method])
# Compute the two-sided p-value and confidence intervals
# https://online.stat.psu.edu/stat505/lesson/6/6.3
pval = _correl_pvalue(r, n, k, alternative)
ci = compute_esci(
stat=r, nx=(n - k), ny=(n - k), eftype="r", decimals=6, alternative=alternative
)
# Create dictionnary
stats = {
"n": n,
"r": r,
"CI95%": [ci],
"p-val": pval,
}
# Convert to DataFrame
stats = pd.DataFrame(stats, index=[method])
# Define order
col_keep = ["n", "r", "CI95%", "p-val"]
col_order = [k for k in col_keep if k in stats.keys().tolist()]
return _postprocess_dataframe(stats)[col_order]
| (data=None, x=None, y=None, covar=None, x_covar=None, y_covar=None, alternative='two-sided', method='pearson') |
32,037 | pingouin.correlation | pcorr | Partial correlation matrix (:py:class:`pandas.DataFrame` method).
Returns
-------
pcormat : :py:class:`pandas.DataFrame`
Partial correlation matrix.
Notes
-----
This function calculates the pairwise partial correlations for each pair of
variables in a :py:class:`pandas.DataFrame` given all the others. It has
the same behavior as the pcor function in the
`ppcor <https://cran.r-project.org/web/packages/ppcor/index.html>`_
R package.
Note that this function only returns the raw Pearson correlation
coefficient. If you want to calculate the test statistic and p-values, or
use more robust estimates of the correlation coefficient, please refer to
the :py:func:`pingouin.pairwise_corr` or :py:func:`pingouin.partial_corr`
functions.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('mediation')
>>> data.pcorr().round(3)
X M Y Mbin Ybin W1 W2
X 1.000 0.359 0.074 -0.019 -0.147 -0.148 -0.067
M 0.359 1.000 0.555 -0.024 -0.112 -0.138 -0.176
Y 0.074 0.555 1.000 -0.001 0.169 0.101 0.108
Mbin -0.019 -0.024 -0.001 1.000 -0.080 -0.032 -0.040
Ybin -0.147 -0.112 0.169 -0.080 1.000 -0.000 -0.140
W1 -0.148 -0.138 0.101 -0.032 -0.000 1.000 -0.394
W2 -0.067 -0.176 0.108 -0.040 -0.140 -0.394 1.000
On a subset of columns
>>> data[['X', 'Y', 'M']].pcorr()
X Y M
X 1.000000 0.036649 0.412804
Y 0.036649 1.000000 0.540140
M 0.412804 0.540140 1.000000
| @pf.register_dataframe_method
def pcorr(self):
"""Partial correlation matrix (:py:class:`pandas.DataFrame` method).
Returns
-------
pcormat : :py:class:`pandas.DataFrame`
Partial correlation matrix.
Notes
-----
This function calculates the pairwise partial correlations for each pair of
variables in a :py:class:`pandas.DataFrame` given all the others. It has
the same behavior as the pcor function in the
`ppcor <https://cran.r-project.org/web/packages/ppcor/index.html>`_
R package.
Note that this function only returns the raw Pearson correlation
coefficient. If you want to calculate the test statistic and p-values, or
use more robust estimates of the correlation coefficient, please refer to
the :py:func:`pingouin.pairwise_corr` or :py:func:`pingouin.partial_corr`
functions.
Examples
--------
>>> import pingouin as pg
>>> data = pg.read_dataset('mediation')
>>> data.pcorr().round(3)
X M Y Mbin Ybin W1 W2
X 1.000 0.359 0.074 -0.019 -0.147 -0.148 -0.067
M 0.359 1.000 0.555 -0.024 -0.112 -0.138 -0.176
Y 0.074 0.555 1.000 -0.001 0.169 0.101 0.108
Mbin -0.019 -0.024 -0.001 1.000 -0.080 -0.032 -0.040
Ybin -0.147 -0.112 0.169 -0.080 1.000 -0.000 -0.140
W1 -0.148 -0.138 0.101 -0.032 -0.000 1.000 -0.394
W2 -0.067 -0.176 0.108 -0.040 -0.140 -0.394 1.000
On a subset of columns
>>> data[['X', 'Y', 'M']].pcorr()
X Y M
X 1.000000 0.036649 0.412804
Y 0.036649 1.000000 0.540140
M 0.412804 0.540140 1.000000
"""
V = self.cov(numeric_only=True) # Covariance matrix
Vi = np.linalg.pinv(V, hermitian=True) # Inverse covariance matrix
D = np.diag(np.sqrt(1 / np.diag(Vi)))
pcor = -1 * (D @ Vi @ D) # Partial correlation matrix
pcor[np.diag_indices_from(pcor)] = 1
return pd.DataFrame(pcor, index=V.index, columns=V.columns)
| (self) |
32,038 | pingouin.plotting | plot_blandaltman |
Generate a Bland-Altman plot to compare two sets of measurements.
Parameters
----------
x, y : pd.Series, np.array, or list
First and second measurements.
agreement : float
Multiple of the standard deviation to plot agreement limits.
The defaults is 1.96, which corresponds to 95% confidence interval if
the differences are normally distributed.
xaxis : str
Define which measurements should be used as the reference (x-axis).
Default is to use the average of x and y ("mean"). Accepted values are
"mean", "x" or "y".
confidence : float
If not None, plot the specified percentage confidence interval of
the mean and limits of agreement. The CIs of the mean difference and
agreement limits describe a possible error in the
estimate due to a sampling error. The greater the sample size,
the narrower the CIs will be.
annotate : bool
If True (default), annotate the values for the mean difference
and agreement limits.
ax : matplotlib axes
Axis on which to draw the plot.
**kwargs : optional
Optional argument(s) passed to :py:func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Bland-Altman plots [1]_ are extensively used to evaluate the agreement
among two different instruments or two measurements techniques.
They allow identification of any systematic difference between the
measurements (i.e., fixed bias) or possible outliers.
The mean difference (= x - y) is the estimated bias, and the SD of the
differences measures the random fluctuations around this mean.
If the mean value of the difference differs significantly from 0 on the
basis of a 1-sample t-test, this indicates the presence of fixed bias.
If there is a consistent bias, it can be adjusted for by subtracting the
mean difference from the new method.
It is common to compute 95% limits of agreement for each comparison
(average difference ± 1.96 standard deviation of the difference), which
tells us how far apart measurements by 2 methods were more likely to be
for most individuals. If the differences within mean ± 1.96 SD are not
clinically important, the two methods may be used interchangeably.
The 95% limits of agreement can be unreliable estimates of the population
parameters especially for small sample sizes so, when comparing methods
or assessing repeatability, it is important to calculate confidence
intervals for the 95% limits of agreement.
The code is an adaptation of the
`PyCompare <https://github.com/jaketmp/pyCompare>`_ package. The present
implementation is a simplified version; please refer to the original
package for more advanced functionalities.
References
----------
.. [1] Bland, J. M., & Altman, D. (1986). Statistical methods for assessing
agreement between two methods of clinical measurement. The lancet,
327(8476), 307-310.
.. [2] Giavarina, D. (2015). Understanding bland altman analysis.
Biochemia medica, 25(2), 141-151.
Examples
--------
Bland-Altman plot (example data from [2]_)
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset("blandaltman")
>>> ax = pg.plot_blandaltman(df['A'], df['B'])
>>> plt.tight_layout()
| def plot_blandaltman(
x, y, agreement=1.96, xaxis="mean", confidence=0.95, annotate=True, ax=None, **kwargs
):
"""
Generate a Bland-Altman plot to compare two sets of measurements.
Parameters
----------
x, y : pd.Series, np.array, or list
First and second measurements.
agreement : float
Multiple of the standard deviation to plot agreement limits.
The defaults is 1.96, which corresponds to 95% confidence interval if
the differences are normally distributed.
xaxis : str
Define which measurements should be used as the reference (x-axis).
Default is to use the average of x and y ("mean"). Accepted values are
"mean", "x" or "y".
confidence : float
If not None, plot the specified percentage confidence interval of
the mean and limits of agreement. The CIs of the mean difference and
agreement limits describe a possible error in the
estimate due to a sampling error. The greater the sample size,
the narrower the CIs will be.
annotate : bool
If True (default), annotate the values for the mean difference
and agreement limits.
ax : matplotlib axes
Axis on which to draw the plot.
**kwargs : optional
Optional argument(s) passed to :py:func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Bland-Altman plots [1]_ are extensively used to evaluate the agreement
among two different instruments or two measurements techniques.
They allow identification of any systematic difference between the
measurements (i.e., fixed bias) or possible outliers.
The mean difference (= x - y) is the estimated bias, and the SD of the
differences measures the random fluctuations around this mean.
If the mean value of the difference differs significantly from 0 on the
basis of a 1-sample t-test, this indicates the presence of fixed bias.
If there is a consistent bias, it can be adjusted for by subtracting the
mean difference from the new method.
It is common to compute 95% limits of agreement for each comparison
(average difference ± 1.96 standard deviation of the difference), which
tells us how far apart measurements by 2 methods were more likely to be
for most individuals. If the differences within mean ± 1.96 SD are not
clinically important, the two methods may be used interchangeably.
The 95% limits of agreement can be unreliable estimates of the population
parameters especially for small sample sizes so, when comparing methods
or assessing repeatability, it is important to calculate confidence
intervals for the 95% limits of agreement.
The code is an adaptation of the
`PyCompare <https://github.com/jaketmp/pyCompare>`_ package. The present
implementation is a simplified version; please refer to the original
package for more advanced functionalities.
References
----------
.. [1] Bland, J. M., & Altman, D. (1986). Statistical methods for assessing
agreement between two methods of clinical measurement. The lancet,
327(8476), 307-310.
.. [2] Giavarina, D. (2015). Understanding bland altman analysis.
Biochemia medica, 25(2), 141-151.
Examples
--------
Bland-Altman plot (example data from [2]_)
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset("blandaltman")
>>> ax = pg.plot_blandaltman(df['A'], df['B'])
>>> plt.tight_layout()
"""
# Safety check
assert xaxis in ["mean", "x", "y"]
# Get names before converting to NumPy array
xname = x.name if isinstance(x, pd.Series) else "x"
yname = y.name if isinstance(y, pd.Series) else "y"
x = np.asarray(x)
y = np.asarray(y)
assert x.ndim == 1 and y.ndim == 1
assert x.size == y.size
assert not np.isnan(x).any(), "Missing values in x or y are not supported."
assert not np.isnan(y).any(), "Missing values in x or y are not supported."
# Update default kwargs with specified inputs
_scatter_kwargs = {"color": "tab:blue", "alpha": 0.8}
_scatter_kwargs.update(kwargs)
# Calculate mean, STD and SEM of x - y
n = x.size
dof = n - 1
diff = x - y
mean_diff = np.mean(diff)
std_diff = np.std(diff, ddof=1)
mean_diff_se = np.sqrt(std_diff**2 / n)
# Limits of agreements
high = mean_diff + agreement * std_diff
low = mean_diff - agreement * std_diff
high_low_se = np.sqrt(3 * std_diff**2 / n)
# Define x-axis
if xaxis == "mean":
xval = np.vstack((x, y)).mean(0)
xlabel = f"Mean of {xname} and {yname}"
elif xaxis == "x":
xval = x
xlabel = xname
else:
xval = y
xlabel = yname
# Start the plot
if ax is None:
ax = plt.gca()
# Plot the mean diff, limits of agreement and scatter
ax.scatter(xval, diff, **_scatter_kwargs)
ax.axhline(mean_diff, color="k", linestyle="-", lw=2)
ax.axhline(high, color="k", linestyle=":", lw=1.5)
ax.axhline(low, color="k", linestyle=":", lw=1.5)
# Annotate values
if annotate:
loa_range = high - low
offset = (loa_range / 100.0) * 1.5
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
xloc = 0.98
ax.text(xloc, mean_diff + offset, "Mean", ha="right", va="bottom", transform=trans)
ax.text(xloc, mean_diff - offset, "%.2f" % mean_diff, ha="right", va="top", transform=trans)
ax.text(
xloc, high + offset, "+%.2f SD" % agreement, ha="right", va="bottom", transform=trans
)
ax.text(xloc, high - offset, "%.2f" % high, ha="right", va="top", transform=trans)
ax.text(xloc, low - offset, "-%.2f SD" % agreement, ha="right", va="top", transform=trans)
ax.text(xloc, low + offset, "%.2f" % low, ha="right", va="bottom", transform=trans)
# Add 95% confidence intervals for mean bias and limits of agreement
if confidence is not None:
assert 0 < confidence < 1
ci = dict()
ci["mean"] = stats.t.interval(confidence, dof, loc=mean_diff, scale=mean_diff_se)
ci["high"] = stats.t.interval(confidence, dof, loc=high, scale=high_low_se)
ci["low"] = stats.t.interval(confidence, dof, loc=low, scale=high_low_se)
ax.axhspan(ci["mean"][0], ci["mean"][1], facecolor="tab:grey", alpha=0.2)
ax.axhspan(ci["high"][0], ci["high"][1], facecolor=_scatter_kwargs["color"], alpha=0.2)
ax.axhspan(ci["low"][0], ci["low"][1], facecolor=_scatter_kwargs["color"], alpha=0.2)
# Labels
ax.set_ylabel(f"{xname} - {yname}")
ax.set_xlabel(xlabel)
sns.despine(ax=ax)
return ax
| (x, y, agreement=1.96, xaxis='mean', confidence=0.95, annotate=True, ax=None, **kwargs) |
32,039 | pingouin.plotting | plot_circmean | Plot the circular mean and vector length of a set of angles
on the unit circle.
.. versionadded:: 0.3.3
Parameters
----------
angles : array or list
Angles (expressed in radians). Only 1D array are supported here.
square: bool
If True (default), ensure equal aspect ratio between X and Y axes.
ax : matplotlib axes
Axis on which to draw the plot.
kwargs_markers : dict
Optional keywords arguments that are passed to
:obj:`matplotlib.axes.Axes.plot`
to control the markers aesthetics.
kwargs_arrow : dict
Optional keywords arguments that are passed to
:obj:`matplotlib.axes.Axes.arrow`
to control the arrow aesthetics.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Examples
--------
Default plot
.. plot::
>>> import pingouin as pg
>>> ax = pg.plot_circmean([0.05, -0.8, 1.2, 0.8, 0.5, -0.3, 0.3, 0.7])
Changing some aesthetics parameters
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> _, ax = plt.subplots(1, 1, figsize=(3, 3))
>>> ax = pg.plot_circmean([0.05, -0.8, 1.2, 0.8, 0.5, -0.3, 0.3, 0.7],
... kwargs_markers=dict(color='k', mfc='k'),
... kwargs_arrow=dict(ec='k', fc='k'), ax=ax)
.. plot::
>>> import pingouin as pg
>>> import seaborn as sns
>>> sns.set(font_scale=1.5, style='white')
>>> ax = pg.plot_circmean([0.8, 1.5, 3.14, 5.2, 6.1, 2.8, 2.6, 3.2],
... kwargs_markers=dict(marker="None"))
| def plot_circmean(
angles,
square=True,
ax=None,
kwargs_markers=dict(color="tab:blue", marker="o", mfc="none", ms=10),
kwargs_arrow=dict(width=0.01, head_width=0.1, head_length=0.1, fc="tab:red", ec="tab:red"),
):
"""Plot the circular mean and vector length of a set of angles
on the unit circle.
.. versionadded:: 0.3.3
Parameters
----------
angles : array or list
Angles (expressed in radians). Only 1D array are supported here.
square: bool
If True (default), ensure equal aspect ratio between X and Y axes.
ax : matplotlib axes
Axis on which to draw the plot.
kwargs_markers : dict
Optional keywords arguments that are passed to
:obj:`matplotlib.axes.Axes.plot`
to control the markers aesthetics.
kwargs_arrow : dict
Optional keywords arguments that are passed to
:obj:`matplotlib.axes.Axes.arrow`
to control the arrow aesthetics.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Examples
--------
Default plot
.. plot::
>>> import pingouin as pg
>>> ax = pg.plot_circmean([0.05, -0.8, 1.2, 0.8, 0.5, -0.3, 0.3, 0.7])
Changing some aesthetics parameters
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> _, ax = plt.subplots(1, 1, figsize=(3, 3))
>>> ax = pg.plot_circmean([0.05, -0.8, 1.2, 0.8, 0.5, -0.3, 0.3, 0.7],
... kwargs_markers=dict(color='k', mfc='k'),
... kwargs_arrow=dict(ec='k', fc='k'), ax=ax)
.. plot::
>>> import pingouin as pg
>>> import seaborn as sns
>>> sns.set(font_scale=1.5, style='white')
>>> ax = pg.plot_circmean([0.8, 1.5, 3.14, 5.2, 6.1, 2.8, 2.6, 3.2],
... kwargs_markers=dict(marker="None"))
"""
from matplotlib.patches import Circle
from .circular import circ_r, circ_mean
# Sanity checks
angles = np.asarray(angles)
assert angles.ndim == 1, "angles must be a one-dimensional array."
assert angles.size > 1, "angles must have at least 2 values."
assert isinstance(kwargs_markers, dict), "kwargs_markers must be a dict."
assert isinstance(kwargs_arrow, dict), "kwargs_arrow must be a dict."
# Fill missing values in dict
if "color" not in kwargs_markers.keys():
kwargs_markers["color"] = "tab:blue"
if "marker" not in kwargs_markers.keys():
kwargs_markers["marker"] = "o"
if "mfc" not in kwargs_markers.keys():
kwargs_markers["mfc"] = "none"
if "ms" not in kwargs_markers.keys():
kwargs_markers["ms"] = 10
if "width" not in kwargs_arrow.keys():
kwargs_arrow["width"] = 0.01
if "head_width" not in kwargs_arrow.keys():
kwargs_arrow["head_width"] = 0.1
if "head_length" not in kwargs_arrow.keys():
kwargs_arrow["head_length"] = 0.1
if "fc" not in kwargs_arrow.keys():
kwargs_arrow["fc"] = "tab:red"
if "ec" not in kwargs_arrow.keys():
kwargs_arrow["ec"] = "tab:red"
# Convert angles to unit vector
z = np.exp(1j * angles)
r = circ_r(angles) # Resulting vector length
phi = circ_mean(angles) # Circular mean
zm = r * np.exp(1j * phi)
# Plot unit circle
if ax is None:
ax = plt.gca()
circle = Circle((0, 0), 1, edgecolor="k", facecolor="none", linewidth=2)
ax.add_patch(circle)
ax.axvline(0, lw=1, ls=":", color="slategrey")
ax.axhline(0, lw=1, ls=":", color="slategrey")
ax.plot(np.real(z), np.imag(z), ls="None", **kwargs_markers)
# Plot mean resultant vector
ax.arrow(0, 0, np.real(zm), np.imag(zm), **kwargs_arrow)
# X and Y ticks in radians
ax.set_xticks([])
ax.set_yticks([])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.text(1.2, 0, "0", verticalalignment="center")
ax.text(-1.3, 0, r"$\pi$", verticalalignment="center")
ax.text(0, 1.2, r"$+\pi/2$", horizontalalignment="center")
ax.text(0, -1.3, r"$-\pi/2$", horizontalalignment="center")
# Make square
if square:
ax.set_aspect("equal")
return ax
| (angles, square=True, ax=None, kwargs_markers={'color': 'tab:blue', 'marker': 'o', 'mfc': 'none', 'ms': 10}, kwargs_arrow={'width': 0.01, 'head_width': 0.1, 'head_length': 0.1, 'fc': 'tab:red', 'ec': 'tab:red'}) |
32,040 | pingouin.plotting | plot_paired |
Paired plot.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Long-format dataFrame.
dv : string
Name of column containing the dependent variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
order : list of str
List of values in ``within`` that define the order of elements on the
x-axis of the plot. If None, uses alphabetical order.
boxplot : boolean
If True, add a boxplot to the paired lines using the
:py:func:`seaborn.boxplot` function.
boxplot_in_front : boolean
If True, the boxplot is plotted on the foreground (i.e. above the
individual lines) and with a slight transparency. This makes the
overall plot more readable when plotting a large numbers of subjects.
.. versionadded:: 0.3.8
orient : string
Plot the boxplots vertically and the subjects on the x-axis if
``orient='v'`` (default). Set to ``orient='h'`` to rotate the plot by
by 90 degrees.
.. versionadded:: 0.3.9
ax : matplotlib axes
Axis on which to draw the plot.
colors : list of str
Line colors names. Default is green when value increases from A to B,
indianred when value decreases from A to B and grey when the value is
the same in both measurements.
pointplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.pointplot` function.
boxplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.boxplot` function.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Data must be a long-format pandas DataFrame. Missing values are automatically removed using a
strict listwise approach (= complete-case analysis).
Examples
--------
Default paired plot:
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('mixed_anova').query("Time != 'January'")
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', subject='Subject')
Paired plot on an existing axis (no boxplot and uniform color):
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> df = pg.read_dataset('mixed_anova').query("Time != 'January'")
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4))
>>> pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', ax=ax1, boxplot=False,
... colors=['grey', 'grey', 'grey']) # doctest: +SKIP
Horizontal paired plot with three unique within-levels:
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> df = pg.read_dataset('mixed_anova').query("Group == 'Meditation'")
>>> # df = df.query("Group == 'Meditation' and Subject > 40")
>>> pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', orient='h') # doctest: +SKIP
With the boxplot on the foreground:
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('mixed_anova').query("Time != 'January'")
>>> df = df.query("Group == 'Control'")
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', boxplot_in_front=True)
| def plot_paired(
data=None,
dv=None,
within=None,
subject=None,
order=None,
boxplot=True,
boxplot_in_front=False,
orient="v",
ax=None,
colors=["green", "grey", "indianred"],
pointplot_kwargs={"scale": 0.6, "marker": "."},
boxplot_kwargs={"color": "lightslategrey", "width": 0.2},
):
"""
Paired plot.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Long-format dataFrame.
dv : string
Name of column containing the dependent variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
order : list of str
List of values in ``within`` that define the order of elements on the
x-axis of the plot. If None, uses alphabetical order.
boxplot : boolean
If True, add a boxplot to the paired lines using the
:py:func:`seaborn.boxplot` function.
boxplot_in_front : boolean
If True, the boxplot is plotted on the foreground (i.e. above the
individual lines) and with a slight transparency. This makes the
overall plot more readable when plotting a large numbers of subjects.
.. versionadded:: 0.3.8
orient : string
Plot the boxplots vertically and the subjects on the x-axis if
``orient='v'`` (default). Set to ``orient='h'`` to rotate the plot by
by 90 degrees.
.. versionadded:: 0.3.9
ax : matplotlib axes
Axis on which to draw the plot.
colors : list of str
Line colors names. Default is green when value increases from A to B,
indianred when value decreases from A to B and grey when the value is
the same in both measurements.
pointplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.pointplot` function.
boxplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.boxplot` function.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Data must be a long-format pandas DataFrame. Missing values are automatically removed using a
strict listwise approach (= complete-case analysis).
Examples
--------
Default paired plot:
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('mixed_anova').query("Time != 'January'")
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', subject='Subject')
Paired plot on an existing axis (no boxplot and uniform color):
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> df = pg.read_dataset('mixed_anova').query("Time != 'January'")
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4))
>>> pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', ax=ax1, boxplot=False,
... colors=['grey', 'grey', 'grey']) # doctest: +SKIP
Horizontal paired plot with three unique within-levels:
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> df = pg.read_dataset('mixed_anova').query("Group == 'Meditation'")
>>> # df = df.query("Group == 'Meditation' and Subject > 40")
>>> pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', orient='h') # doctest: +SKIP
With the boxplot on the foreground:
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('mixed_anova').query("Time != 'January'")
>>> df = df.query("Group == 'Control'")
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', boxplot_in_front=True)
"""
from pingouin.utils import _check_dataframe
# Update default kwargs with specified inputs
_pointplot_kwargs = {"scale": 0.6, "marker": "."}
_pointplot_kwargs.update(pointplot_kwargs)
_boxplot_kwargs = {"color": "lightslategrey", "width": 0.2}
_boxplot_kwargs.update(boxplot_kwargs)
# Extract pointplot alpha, if set
pp_alpha = _pointplot_kwargs.pop("alpha", 1.0)
# Calculate size of the plot elements by scale as in Seaborn pointplot
scale = _pointplot_kwargs.pop("scale")
lw = plt.rcParams["lines.linewidth"] * 1.8 * scale # get the linewidth
mew = lw * 0.75 # get the markeredgewidth
markersize = np.pi * np.square(lw) * 2 # get the markersize
# Set boxplot in front of Line2D plot (zorder=2 for both) and add alpha
if boxplot_in_front:
_boxplot_kwargs.update(
{
"boxprops": {"zorder": 2},
"whiskerprops": {"zorder": 2},
"zorder": 2,
}
)
# Validate args
data = _check_dataframe(data=data, dv=dv, within=within, subject=subject, effects="within")
# Pivot and melt the table. This has several effects:
# 1) Force missing values to be explicit (a NaN cell is created)
# 2) Automatic collapsing to the mean if multiple within factors are present
# 3) If using dropna, remove rows with missing values (listwise deletion).
# The latter is the same behavior as JASP (= strict complete-case analysis).
data_piv = data.pivot_table(index=subject, columns=within, values=dv, observed=True)
data_piv = data_piv.dropna()
data = data_piv.melt(ignore_index=False, value_name=dv).reset_index()
# Extract within-subject level (alphabetical order)
x_cat = np.unique(data[within])
if order is None:
order = x_cat
else:
assert len(order) == len(
x_cat
), "Order must have the same number of elements as the number of levels in `within`."
# Substitue within by integer order of the ordered columns to allow for
# changing the order of numeric withins.
data["wthn"] = data[within].replace({_ordr: i for i, _ordr in enumerate(order)})
order_num = range(len(order)) # Make numeric order
# Start the plot
if ax is None:
ax = plt.gca()
# Set x and y depending on orientation using the num. replacement within
_x = "wthn" if orient == "v" else dv
_y = dv if orient == "v" else "wthn"
for cat in range(len(x_cat) - 1):
_order = (order_num[cat], order_num[cat + 1])
# Extract data of the current subject-combination
data_now = data.loc[data["wthn"].isin(_order), [dv, "wthn", subject]]
# Select colors for all lines between the current subjects
y1 = data_now.loc[data_now["wthn"] == _order[0], dv].to_numpy()
y2 = data_now.loc[data_now["wthn"] == _order[1], dv].to_numpy()
# Line and scatter colors depending on subject dv trend
_colors = np.where(y1 < y2, colors[0], np.where(y1 > y2, colors[2], colors[1]))
# Line and scatter colors as hue-indexed dictionary
_colors = {subj: clr for subj, clr in zip(data_now[subject].unique(), _colors)}
# Plot individual lines using Seaborn
sns.lineplot(
data=data_now,
x=_x,
y=_y,
hue=subject,
palette=_colors,
ls="-",
lw=lw,
legend=False,
ax=ax,
)
# Plot individual markers using Seaborn
sns.scatterplot(
data=data_now,
x=_x,
y=_y,
hue=subject,
palette=_colors,
edgecolor="face",
lw=mew,
sizes=[markersize] * data_now.shape[0],
legend=False,
ax=ax,
**_pointplot_kwargs,
)
# Set zorder and alpha of pointplot markers and lines
_ = plt.setp(ax.collections, alpha=pp_alpha, zorder=2) # Set marker alpha
_ = plt.setp(ax.lines, alpha=pp_alpha, zorder=2) # Set line alpha
if boxplot:
# Set boxplot x and y depending on orientation
_xbp = within if orient == "v" else dv
_ybp = dv if orient == "v" else within
sns.boxplot(data=data, x=_xbp, y=_ybp, order=order, ax=ax, orient=orient, **_boxplot_kwargs)
# Set alpha to patch of boxplot but not to whiskers
for patch in ax.artists:
r, g, b, a = patch.get_facecolor()
patch.set_facecolor((r, g, b, 0.75))
else:
# If no boxplot, axis needs manual styling as in Seaborn pointplot
if orient == "v":
xlabel, ylabel = within, dv
ax.set_xticks(np.arange(len(x_cat)))
ax.set_xticklabels(order)
ax.xaxis.grid(False)
ax.set_xlim(-0.5, len(x_cat) - 0.5, auto=None)
else:
xlabel, ylabel = dv, within
ax.set_yticks(np.arange(len(x_cat)))
ax.set_yticklabels(order)
ax.yaxis.grid(False)
ax.set_ylim(-0.5, len(x_cat) - 0.5, auto=None)
ax.invert_yaxis()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Despine and trim
sns.despine(trim=True, ax=ax)
return ax
| (data=None, dv=None, within=None, subject=None, order=None, boxplot=True, boxplot_in_front=False, orient='v', ax=None, colors=['green', 'grey', 'indianred'], pointplot_kwargs={'scale': 0.6, 'marker': '.'}, boxplot_kwargs={'color': 'lightslategrey', 'width': 0.2}) |
32,041 | pingouin.plotting | plot_rm_corr | Plot a repeated measures correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
legend : boolean
If True, add legend to plot. Legend will show all the unique values in
``subject``.
kwargs_facetgrid : dict
Optional keyword arguments passed to :py:class:`seaborn.FacetGrid`
kwargs_line : dict
Optional keyword arguments passed to :py:class:`matplotlib.pyplot.plot`
kwargs_scatter : dict
Optional keyword arguments passed to :py:class:`matplotlib.pyplot.scatter`
Returns
-------
g : :py:class:`seaborn.FacetGrid`
Seaborn FacetGrid.
See also
--------
rm_corr
Notes
-----
Repeated measures correlation [1]_ (rmcorr) is a statistical technique
for determining the common within-individual association for paired
measures assessed on two or more occasions for multiple individuals.
Results have been tested against the
`rmcorr <https://github.com/cran/rmcorr>` R package. Note that this
function requires `statsmodels
<https://www.statsmodels.org/stable/index.html>`_.
Missing values are automatically removed from the ``data``
(listwise deletion).
References
----------
.. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation.
Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456
Examples
--------
Default repeated mesures correlation plot
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> g = pg.plot_rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
With some tweakings
.. plot::
>>> import pingouin as pg
>>> import seaborn as sns
>>> df = pg.read_dataset('rm_corr')
>>> sns.set(style='darkgrid', font_scale=1.2)
>>> g = pg.plot_rm_corr(data=df, x='pH', y='PacO2',
... subject='Subject', legend=True,
... kwargs_facetgrid=dict(height=4.5, aspect=1.5,
... palette='Spectral'))
| def plot_rm_corr(
data=None,
x=None,
y=None,
subject=None,
legend=False,
kwargs_facetgrid=dict(height=4, aspect=1),
kwargs_line=dict(ls="solid"),
kwargs_scatter=dict(marker="o"),
):
"""Plot a repeated measures correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
legend : boolean
If True, add legend to plot. Legend will show all the unique values in
``subject``.
kwargs_facetgrid : dict
Optional keyword arguments passed to :py:class:`seaborn.FacetGrid`
kwargs_line : dict
Optional keyword arguments passed to :py:class:`matplotlib.pyplot.plot`
kwargs_scatter : dict
Optional keyword arguments passed to :py:class:`matplotlib.pyplot.scatter`
Returns
-------
g : :py:class:`seaborn.FacetGrid`
Seaborn FacetGrid.
See also
--------
rm_corr
Notes
-----
Repeated measures correlation [1]_ (rmcorr) is a statistical technique
for determining the common within-individual association for paired
measures assessed on two or more occasions for multiple individuals.
Results have been tested against the
`rmcorr <https://github.com/cran/rmcorr>` R package. Note that this
function requires `statsmodels
<https://www.statsmodels.org/stable/index.html>`_.
Missing values are automatically removed from the ``data``
(listwise deletion).
References
----------
.. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation.
Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456
Examples
--------
Default repeated mesures correlation plot
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> g = pg.plot_rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
With some tweakings
.. plot::
>>> import pingouin as pg
>>> import seaborn as sns
>>> df = pg.read_dataset('rm_corr')
>>> sns.set(style='darkgrid', font_scale=1.2)
>>> g = pg.plot_rm_corr(data=df, x='pH', y='PacO2',
... subject='Subject', legend=True,
... kwargs_facetgrid=dict(height=4.5, aspect=1.5,
... palette='Spectral'))
"""
# Check that stasmodels is installed
from pingouin.utils import _is_statsmodels_installed
_is_statsmodels_installed(raise_error=True)
from statsmodels.formula.api import ols
# Safety check (duplicated from pingouin.rm_corr)
assert isinstance(data, pd.DataFrame), "Data must be a DataFrame"
assert x in data.columns, "The %s column is not in data." % x
assert y in data.columns, "The %s column is not in data." % y
assert data[x].dtype.kind in "bfiu", "%s must be numeric." % x
assert data[y].dtype.kind in "bfiu", "%s must be numeric." % y
assert subject in data.columns, "The %s column is not in data." % subject
if data[subject].nunique() < 3:
raise ValueError("rm_corr requires at least 3 unique subjects.")
# Remove missing values
data = data[[x, y, subject]].dropna(axis=0)
# Calculate rm_corr
# rmc = pg.rm_corr(data=data, x=x, y=y, subject=subject)
# Fit ANCOVA model
# https://patsy.readthedocs.io/en/latest/builtins-reference.html
# C marks the data as categorical
# Q allows to quote variable that do not meet Python variable name rule
# e.g. if variable is "weight.in.kg" or "2A"
assert x not in ["C", "Q"], "`x` must not be 'C' or 'Q'."
assert y not in ["C", "Q"], "`y` must not be 'C' or 'Q'."
assert subject not in ["C", "Q"], "`subject` must not be 'C' or 'Q'."
formula = f"Q('{y}') ~ C(Q('{subject}')) + Q('{x}')"
model = ols(formula, data=data).fit()
# Fitted values
data["pred"] = model.fittedvalues
# Define color palette
if "palette" not in kwargs_facetgrid:
kwargs_facetgrid["palette"] = sns.hls_palette(data[subject].nunique())
# Start plot
g = sns.FacetGrid(data, hue=subject, **kwargs_facetgrid)
g = g.map(sns.regplot, x, "pred", scatter=False, ci=None, truncate=True, line_kws=kwargs_line)
g = g.map(sns.scatterplot, x, y, **kwargs_scatter)
if legend:
g.add_legend()
return g
| (data=None, x=None, y=None, subject=None, legend=False, kwargs_facetgrid={'height': 4, 'aspect': 1}, kwargs_line={'ls': 'solid'}, kwargs_scatter={'marker': 'o'}) |
32,042 | pingouin.plotting | plot_shift | Shift plot.
Parameters
----------
x, y : array_like
First and second set of observations.
paired : bool
Specify whether ``x`` and ``y`` are related (i.e. repeated measures) or independent.
.. versionadded:: 0.3.0
n_boot : int
Number of bootstrap iterations. The higher, the better, the slower.
percentiles: array_like
Sequence of percentiles to compute, which must be between 0 and 100 inclusive.
Default set to [10, 20, 30, 40, 50, 60, 70, 80, 90].
confidence : float
Confidence level (0.95 = 95%) for the confidence intervals.
seed : int or None
Random seed for generating bootstrap samples, can be integer or None for no seed (default).
show_median: boolean
If True (default), show the median with black lines.
violin: boolean
If True (default), plot the density of X and Y distributions. Defaut set to True.
Returns
-------
fig : matplotlib Figure instance
Matplotlib Figure. To get the individual axes, use fig.axes.
See also
--------
harrelldavis
Notes
-----
The shift plot is described in [1]_. It computes a shift function [2]_ for two (in)dependent
groups using the robust Harrell-Davis quantile estimator in conjunction with bias-corrected
bootstrap confidence intervals.
References
----------
.. [1] Rousselet, G. A., Pernet, C. R. and Wilcox, R. R. (2017). Beyond
differences in means: robust graphical methods to compare two groups
in neuroscience. Eur J Neurosci, 46: 1738-1748.
doi:10.1111/ejn.13610
.. [2] https://garstats.wordpress.com/2016/07/12/shift-function/
Examples
--------
Default shift plot
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(42)
>>> x = np.random.normal(5.5, 2, 50)
>>> y = np.random.normal(6, 1.5, 50)
>>> fig = pg.plot_shift(x, y)
With different options, and custom axes labels
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> data = pg.read_dataset("pairwise_corr")
>>> fig = pg.plot_shift(data["Neuroticism"], data["Conscientiousness"], paired=True,
... n_boot=2000, percentiles=[25, 50, 75], show_median=False, seed=456,
... violin=False)
>>> fig.axes[0].set_xlabel("Groups")
>>> fig.axes[0].set_ylabel("Values", size=15)
>>> fig.axes[0].set_title("Comparing Neuroticism and Conscientiousness", size=15)
>>> fig.axes[1].set_xlabel("Neuroticism quantiles", size=12)
>>> plt.tight_layout()
| def plot_shift(
x,
y,
paired=False,
n_boot=1000,
percentiles=np.arange(10, 100, 10),
confidence=0.95,
seed=None,
show_median=True,
violin=True,
):
"""Shift plot.
Parameters
----------
x, y : array_like
First and second set of observations.
paired : bool
Specify whether ``x`` and ``y`` are related (i.e. repeated measures) or independent.
.. versionadded:: 0.3.0
n_boot : int
Number of bootstrap iterations. The higher, the better, the slower.
percentiles: array_like
Sequence of percentiles to compute, which must be between 0 and 100 inclusive.
Default set to [10, 20, 30, 40, 50, 60, 70, 80, 90].
confidence : float
Confidence level (0.95 = 95%) for the confidence intervals.
seed : int or None
Random seed for generating bootstrap samples, can be integer or None for no seed (default).
show_median: boolean
If True (default), show the median with black lines.
violin: boolean
If True (default), plot the density of X and Y distributions. Defaut set to True.
Returns
-------
fig : matplotlib Figure instance
Matplotlib Figure. To get the individual axes, use fig.axes.
See also
--------
harrelldavis
Notes
-----
The shift plot is described in [1]_. It computes a shift function [2]_ for two (in)dependent
groups using the robust Harrell-Davis quantile estimator in conjunction with bias-corrected
bootstrap confidence intervals.
References
----------
.. [1] Rousselet, G. A., Pernet, C. R. and Wilcox, R. R. (2017). Beyond
differences in means: robust graphical methods to compare two groups
in neuroscience. Eur J Neurosci, 46: 1738-1748.
doi:10.1111/ejn.13610
.. [2] https://garstats.wordpress.com/2016/07/12/shift-function/
Examples
--------
Default shift plot
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(42)
>>> x = np.random.normal(5.5, 2, 50)
>>> y = np.random.normal(6, 1.5, 50)
>>> fig = pg.plot_shift(x, y)
With different options, and custom axes labels
.. plot::
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> data = pg.read_dataset("pairwise_corr")
>>> fig = pg.plot_shift(data["Neuroticism"], data["Conscientiousness"], paired=True,
... n_boot=2000, percentiles=[25, 50, 75], show_median=False, seed=456,
... violin=False)
>>> fig.axes[0].set_xlabel("Groups")
>>> fig.axes[0].set_ylabel("Values", size=15)
>>> fig.axes[0].set_title("Comparing Neuroticism and Conscientiousness", size=15)
>>> fig.axes[1].set_xlabel("Neuroticism quantiles", size=12)
>>> plt.tight_layout()
"""
from pingouin.regression import _bias_corrected_ci
from pingouin.nonparametric import harrelldavis as hd
# Safety check
x = np.asarray(x)
y = np.asarray(y)
percentiles = np.asarray(percentiles) / 100 # Convert to 0 - 1 range
assert x.ndim == 1, "x must be 1D."
assert y.ndim == 1, "y must be 1D."
nx, ny = x.size, y.size
assert not np.isnan(x).any(), "Missing values are not allowed."
assert not np.isnan(y).any(), "Missing values are not allowed."
assert nx >= 10, "x must have at least 10 samples."
assert ny >= 10, "y must have at least 10 samples."
assert 0 < confidence < 1, "confidence must be between 0 and 1."
if paired:
assert nx == ny, "x and y must have the same size when paired=True."
# Robust percentile
x_per = hd(x, percentiles)
y_per = hd(y, percentiles)
delta = y_per - x_per
# Compute bootstrap distribution of differences
rng = np.random.RandomState(seed)
if paired:
bootsam = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
bootstat = hd(y[bootsam], percentiles, axis=0) - hd(x[bootsam], percentiles, axis=0)
else:
x_list = rng.choice(x, size=(nx, n_boot), replace=True)
y_list = rng.choice(y, size=(ny, n_boot), replace=True)
bootstat = hd(y_list, percentiles, axis=0) - hd(x_list, percentiles, axis=0)
# Find upper and lower confidence interval for each quantiles
# Bias-corrected bootstrapped confidence interval
lower, median_per, upper = [], [], []
for i, d in enumerate(delta):
ci = _bias_corrected_ci(bootstat[i, :], d, alpha=(1 - confidence))
median_per.append(_bias_corrected_ci(bootstat[i, :], d, alpha=1)[0])
lower.append(ci[0])
upper.append(ci[1])
lower = np.asarray(lower)
median_per = np.asarray(median_per)
upper = np.asarray(upper)
# Create long-format dataFrame for use with Seaborn
data = pd.DataFrame({"value": np.concatenate([x, y]), "variable": ["X"] * nx + ["Y"] * ny})
#############################
# Plots X and Y distributions
#############################
fig = plt.figure(figsize=(8, 5))
ax1 = plt.subplot2grid((3, 3), (0, 0), rowspan=2, colspan=3)
# Boxplot X & Y
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
for dis, pos in zip([x, y], [1.2, -0.2]):
qrt1, medians, qrt3 = np.percentile(dis, [25, 50, 75])
whiskers = adjacent_values(np.sort(dis), qrt1, qrt3)
ax1.plot(medians, pos, marker="o", color="white", zorder=10)
ax1.hlines(pos, qrt1, qrt3, color="k", linestyle="-", lw=7, zorder=9)
ax1.hlines(pos, whiskers[0], whiskers[1], color="k", linestyle="-", lw=2, zorder=9)
ax1 = sns.stripplot(
data=data,
x="value",
y="variable",
orient="h",
order=["Y", "X"],
palette=["#88bedc", "#cfcfcf"],
)
if violin:
vl = plt.violinplot([y, x], showextrema=False, vert=False, widths=1)
# Upper plot
paths = vl["bodies"][0].get_paths()[0]
paths.vertices[:, 1][paths.vertices[:, 1] >= 1] = 1
paths.vertices[:, 1] = paths.vertices[:, 1] - 1.2
vl["bodies"][0].set_edgecolor("k")
vl["bodies"][0].set_facecolor("#88bedc")
vl["bodies"][0].set_alpha(0.8)
# Lower plot
paths = vl["bodies"][1].get_paths()[0]
paths.vertices[:, 1][paths.vertices[:, 1] <= 2] = 2
paths.vertices[:, 1] = paths.vertices[:, 1] - 0.8
vl["bodies"][1].set_edgecolor("k")
vl["bodies"][1].set_facecolor("#cfcfcf")
vl["bodies"][1].set_alpha(0.8)
# Rescale ylim
ax1.set_ylim(2, -1)
for i in range(len(percentiles)):
# Connection between quantiles
if upper[i] < 0:
col = "#4c72b0"
elif lower[i] > 0:
col = "#c34e52"
else:
col = "darkgray"
plt.plot([y_per[i], x_per[i]], [0.2, 0.8], marker="o", color=col, zorder=10)
# X quantiles
plt.plot([x_per[i], x_per[i]], [0.8, 1.2], "k--", zorder=9)
# Y quantiles
plt.plot([y_per[i], y_per[i]], [-0.2, 0.2], "k--", zorder=9)
if show_median:
x_med, y_med = np.median(x), np.median(y)
plt.plot([x_med, x_med], [0.8, 1.2], "k-")
plt.plot([y_med, y_med], [-0.2, 0.2], "k-")
plt.xlabel("Scores (a.u.)", size=15)
ax1.set_yticklabels(["Y", "X"], size=15)
ax1.set_ylabel("")
#######################
# Plots quantiles shift
#######################
ax2 = plt.subplot2grid((3, 3), (2, 0), rowspan=1, colspan=3)
for i, per in enumerate(x_per):
if upper[i] < 0:
col = "#4c72b0"
elif lower[i] > 0:
col = "#c34e52"
else:
col = "darkgray"
plt.plot([per, per], [upper[i], lower[i]], lw=3, color=col, zorder=10)
plt.plot(per, median_per[i], marker="o", ms=10, color=col, zorder=10)
plt.axhline(y=0, ls="--", lw=2, color="gray")
ax2.set_xlabel("X quantiles", size=15)
ax2.set_ylabel("Y - X quantiles \n differences (a.u.)", size=10)
sns.despine()
plt.tight_layout()
return fig
| (x, y, paired=False, n_boot=1000, percentiles=array([10, 20, 30, 40, 50, 60, 70, 80, 90]), confidence=0.95, seed=None, show_median=True, violin=True) |
32,045 | pingouin.power | power_anova |
Evaluate power, sample size, effect size or significance level of a one-way balanced ANOVA.
Parameters
----------
eta_squared : float
ANOVA effect size (eta-squared, :math:`\eta^2`).
k : int
Number of groups
n : int
Sample size per group. Groups are assumed to be balanced (i.e. same sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level :math:`\alpha` (type I error probability). The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``eta_squared``, ``k``, ``n``, ``power`` and ``alpha``
must be passed as None, and that parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
This function is a Python adaptation of the `pwr.anova.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an
effect there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
For one-way ANOVA, eta-squared is the same as partial eta-squared. It can be evaluated from the
F-value (:math:`F^*`) and the degrees of freedom of the ANOVA (:math:`v_1, v_2`) using the
following formula:
.. math:: \eta^2 = \frac{v_1 F^*}{v_1 F^* + v_2}
GPower uses the :math:`f` effect size instead of the :math:`\eta^2`. The formula to convert
from one to the other are given below:
.. math:: f = \sqrt{\frac{\eta^2}{1 - \eta^2}}
.. math:: \eta^2 = \frac{f^2}{1 + f^2}
Using :math:`\eta^2` and the total sample size :math:`N`, the non-centrality parameter is
defined by:
.. math:: \delta = N * \frac{\eta^2}{1 - \eta^2}
Then the critical value of the non-central F-distribution is computed using the percentile
point function of the F-distribution with:
.. math:: q = 1 - \alpha
.. math:: v_1 = k - 1
.. math:: v_2 = N - k
where :math:`k` is the number of groups.
Finally, the power of the ANOVA is calculated using the survival function of the non-central
F-distribution using the previously computed critical value, non-centrality parameter, and
degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power
>>> from pingouin import power_anova
>>> print('power: %.4f' % power_anova(eta_squared=0.1, k=3, n=20))
power: 0.6082
2. Compute required number of groups
>>> print('k: %.4f' % power_anova(eta_squared=0.1, n=20, power=0.80))
k: 6.0944
3. Compute required sample size
>>> print('n: %.4f' % power_anova(eta_squared=0.1, k=3, power=0.80))
n: 29.9256
4. Compute achieved effect size
>>> print('eta-squared: %.4f' % power_anova(n=20, k=4, power=0.80, alpha=0.05))
eta-squared: 0.1255
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_anova(eta_squared=0.1, n=20, k=4, power=0.80, alpha=None))
alpha: 0.1085
| def power_anova(eta_squared=None, k=None, n=None, power=None, alpha=0.05):
"""
Evaluate power, sample size, effect size or significance level of a one-way balanced ANOVA.
Parameters
----------
eta_squared : float
ANOVA effect size (eta-squared, :math:`\\eta^2`).
k : int
Number of groups
n : int
Sample size per group. Groups are assumed to be balanced (i.e. same sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level :math:`\\alpha` (type I error probability). The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``eta_squared``, ``k``, ``n``, ``power`` and ``alpha``
must be passed as None, and that parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
This function is a Python adaptation of the `pwr.anova.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an
effect there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
For one-way ANOVA, eta-squared is the same as partial eta-squared. It can be evaluated from the
F-value (:math:`F^*`) and the degrees of freedom of the ANOVA (:math:`v_1, v_2`) using the
following formula:
.. math:: \\eta^2 = \\frac{v_1 F^*}{v_1 F^* + v_2}
GPower uses the :math:`f` effect size instead of the :math:`\\eta^2`. The formula to convert
from one to the other are given below:
.. math:: f = \\sqrt{\\frac{\\eta^2}{1 - \\eta^2}}
.. math:: \\eta^2 = \\frac{f^2}{1 + f^2}
Using :math:`\\eta^2` and the total sample size :math:`N`, the non-centrality parameter is
defined by:
.. math:: \\delta = N * \\frac{\\eta^2}{1 - \\eta^2}
Then the critical value of the non-central F-distribution is computed using the percentile
point function of the F-distribution with:
.. math:: q = 1 - \\alpha
.. math:: v_1 = k - 1
.. math:: v_2 = N - k
where :math:`k` is the number of groups.
Finally, the power of the ANOVA is calculated using the survival function of the non-central
F-distribution using the previously computed critical value, non-centrality parameter, and
degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power
>>> from pingouin import power_anova
>>> print('power: %.4f' % power_anova(eta_squared=0.1, k=3, n=20))
power: 0.6082
2. Compute required number of groups
>>> print('k: %.4f' % power_anova(eta_squared=0.1, n=20, power=0.80))
k: 6.0944
3. Compute required sample size
>>> print('n: %.4f' % power_anova(eta_squared=0.1, k=3, power=0.80))
n: 29.9256
4. Compute achieved effect size
>>> print('eta-squared: %.4f' % power_anova(n=20, k=4, power=0.80, alpha=0.05))
eta-squared: 0.1255
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_anova(eta_squared=0.1, n=20, k=4, power=0.80, alpha=None))
alpha: 0.1085
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [eta_squared, k, n, power, alpha]])
if n_none != 1:
err = "Exactly one of eta, k, n, power, and alpha must be None."
raise ValueError(err)
# Safety checks
if eta_squared is not None:
eta_squared = abs(eta_squared)
f_sq = eta_squared / (1 - eta_squared)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
def func(f_sq, k, n, power, alpha):
nc = (n * k) * f_sq
dof1 = k - 1
dof2 = (n * k) - k
fcrit = stats.f.ppf(1 - alpha, dof1, dof2)
return stats.ncf.sf(fcrit, dof1, dof2, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power
return func(f_sq, k, n, power, alpha)
elif k is None:
# Compute required number of groups
def _eval_k(k, f_sq, n, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_k, 2, 100, args=(f_sq, n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif n is None:
# Compute required sample size
def _eval_n(n, f_sq, k, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_n, 2, 1e07, args=(f_sq, k, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif eta_squared is None:
# Compute achieved eta-squared
def _eval_eta(f_sq, k, n, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
f_sq = brenth(_eval_eta, 1e-10, 1 - 1e-10, args=(k, n, power, alpha))
return f_sq / (f_sq + 1) # Return eta-square
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha
def _eval_alpha(alpha, f_sq, k, n, power):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(f_sq, k, n, power))
except ValueError: # pragma: no cover
return np.nan
| (eta_squared=None, k=None, n=None, power=None, alpha=0.05) |
32,046 | pingouin.power | power_chi2 |
Evaluate power, sample size, effect size or significance level of chi-squared tests.
Parameters
----------
dof : float
Degree of freedom (depends on the chosen test).
w : float
Cohen's w effect size [1]_.
n : int
Total number of observations.
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability). The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``w``, ``n``, ``power`` and ``alpha`` must be passed as None,
and that parameter is determined from the others. The degrees of freedom ``dof`` must always
be specified.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
This function is a Python adaptation of the `pwr.chisq.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
The non-centrality parameter is defined by:
.. math:: \delta = N * w^2
Then the critical value is computed using the percentile point function of the :math:`\chi^2`
distribution with the alpha level and degrees of freedom.
Finally, the power of the chi-squared test is calculated using the survival function of the
non-central :math:`\chi^2` distribution using the previously computed critical value,
non-centrality parameter, and the degrees of freedom of the test.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.).
Examples
--------
1. Compute achieved power
>>> from pingouin import power_chi2
>>> print('power: %.4f' % power_chi2(dof=1, w=0.3, n=20))
power: 0.2687
2. Compute required sample size
>>> print('n: %.4f' % power_chi2(dof=3, w=0.3, power=0.80))
n: 121.1396
3. Compute achieved effect size
>>> print('w: %.4f' % power_chi2(dof=2, n=20, power=0.80, alpha=0.05))
w: 0.6941
4. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_chi2(dof=1, w=0.5, n=20, power=0.80, alpha=None))
alpha: 0.1630
| def power_chi2(dof, w=None, n=None, power=None, alpha=0.05):
"""
Evaluate power, sample size, effect size or significance level of chi-squared tests.
Parameters
----------
dof : float
Degree of freedom (depends on the chosen test).
w : float
Cohen's w effect size [1]_.
n : int
Total number of observations.
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability). The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``w``, ``n``, ``power`` and ``alpha`` must be passed as None,
and that parameter is determined from the others. The degrees of freedom ``dof`` must always
be specified.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
This function is a Python adaptation of the `pwr.chisq.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
The non-centrality parameter is defined by:
.. math:: \\delta = N * w^2
Then the critical value is computed using the percentile point function of the :math:`\\chi^2`
distribution with the alpha level and degrees of freedom.
Finally, the power of the chi-squared test is calculated using the survival function of the
non-central :math:`\\chi^2` distribution using the previously computed critical value,
non-centrality parameter, and the degrees of freedom of the test.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.).
Examples
--------
1. Compute achieved power
>>> from pingouin import power_chi2
>>> print('power: %.4f' % power_chi2(dof=1, w=0.3, n=20))
power: 0.2687
2. Compute required sample size
>>> print('n: %.4f' % power_chi2(dof=3, w=0.3, power=0.80))
n: 121.1396
3. Compute achieved effect size
>>> print('w: %.4f' % power_chi2(dof=2, n=20, power=0.80, alpha=0.05))
w: 0.6941
4. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_chi2(dof=1, w=0.5, n=20, power=0.80, alpha=None))
alpha: 0.1630
"""
assert isinstance(dof, (int, float))
# Check the number of arguments that are None
n_none = sum([v is None for v in [w, n, power, alpha]])
if n_none != 1:
err = "Exactly one of w, n, power, and alpha must be None."
raise ValueError(err)
# Safety checks
if w is not None:
w = abs(w)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
def func(w, n, power, alpha):
k = stats.chi2.ppf(1 - alpha, dof)
nc = n * w**2
return stats.ncx2.sf(k, dof, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power
return func(w, n, power, alpha)
elif n is None:
# Compute required sample size
def _eval_n(n, w, power, alpha):
return func(w, n, power, alpha) - power
try:
return brenth(_eval_n, 1, 1e07, args=(w, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif w is None:
# Compute achieved effect size
def _eval_w(w, n, power, alpha):
return func(w, n, power, alpha) - power
try:
return brenth(_eval_w, 1e-10, 100, args=(n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha
def _eval_alpha(alpha, w, n, power):
return func(w, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(w, n, power))
except ValueError: # pragma: no cover
return np.nan
| (dof, w=None, n=None, power=None, alpha=0.05) |
32,047 | pingouin.power | power_corr |
Evaluate power, sample size, correlation coefficient or significance level of a correlation
test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability). The default is 0.05.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must be passed as None,
and that parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
This function is a Python adaptation of the `pwr.r.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Same but one-sided test
>>> print('power: %.4f' % power_corr(r=0.5, n=20, alternative="greater"))
power: 0.7510
>>> print('power: %.4f' % power_corr(r=0.5, n=20, alternative="less"))
power: 0.0000
3. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80))
n: 28.2484
4. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
5. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80, alpha=None))
alpha: 0.1377
| def power_corr(r=None, n=None, power=None, alpha=0.05, alternative="two-sided"):
"""
Evaluate power, sample size, correlation coefficient or significance level of a correlation
test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability). The default is 0.05.
alternative : string
Defines the alternative hypothesis, or tail of the correlation. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return a one-sided
p-value. "greater" tests against the alternative hypothesis that the correlation is
positive (greater than zero), "less" tests against the hypothesis that the correlation is
negative.
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must be passed as None,
and that parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
This function is a Python adaptation of the `pwr.r.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Same but one-sided test
>>> print('power: %.4f' % power_corr(r=0.5, n=20, alternative="greater"))
power: 0.7510
>>> print('power: %.4f' % power_corr(r=0.5, n=20, alternative="less"))
power: 0.0000
3. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80))
n: 28.2484
4. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
5. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80, alpha=None))
alpha: 0.1377
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [r, n, power, alpha]])
if n_none != 1:
raise ValueError("Exactly one of n, r, power, and alpha must be None")
# Safety checks
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
if r is not None:
assert -1 <= r <= 1
if alternative == "two-sided":
r = abs(r)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
if n is not None:
if n <= 4:
warnings.warn("Sample size is too small to estimate power (n <= 4). Returning NaN.")
return np.nan
# Define main function
if alternative == "two-sided":
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha / 2, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + stats.norm.cdf(
(-zr - zrc) * np.sqrt(n - 3)
)
return power
elif alternative == "greater":
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3))
return power
else: # alternative == "less":
def func(r, n, power, alpha):
r = -r
dof = n - 2
ttt = stats.t.ppf(1 - alpha, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3))
return power
# Evaluate missing variable
if power is None and n is not None and r is not None:
# Compute achieved power given r, n and alpha
return func(r, n, power=None, alpha=alpha)
elif n is None and power is not None and r is not None:
# Compute required sample size given r, power and alpha
def _eval_n(n, r, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_n, 4 + 1e-10, 1e09, args=(r, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif r is None and power is not None and n is not None:
# Compute achieved r given sample size, power and alpha level
def _eval_r(r, n, power, alpha):
return func(r, n, power, alpha) - power
try:
if alternative == "two-sided":
return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha))
else:
return brenth(_eval_r, -1 + 1e-10, 1 - 1e-10, args=(n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha (significance) level given r, n and power
def _eval_alpha(alpha, r, n, power):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power))
except ValueError: # pragma: no cover
return np.nan
| (r=None, n=None, power=None, alpha=0.05, alternative='two-sided') |
32,048 | pingouin.power | power_rm_anova |
Evaluate power, sample size, effect size or significance level of a balanced one-way
repeated measures ANOVA.
Parameters
----------
eta_squared : float
ANOVA effect size (eta-squared, :math:`\eta^2`).
m : int
Number of repeated measurements.
n : int
Sample size per measurement. All measurements must have the same sample size.
power : float
Test power (= 1 - type II error).
alpha : float
Significance level :math:`\alpha` (type I error probability). The default is 0.05.
corr : float
Average correlation coefficient among repeated measurements. The default is :math:`r=0.5`.
epsilon : float
Epsilon adjustement factor for sphericity. This can be calculated using the
:py:func:`pingouin.epsilon` function.
Notes
-----
Exactly ONE of the parameters ``eta_squared``, ``m``, ``n``, ``power`` and ``alpha`` must be
passed as None, and that parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
GPower uses the :math:`f` effect size instead of the :math:`\eta^2`. The formula to convert
from one to the other are given below:
.. math:: f = \sqrt{\frac{\eta^2}{1 - \eta^2}}
.. math:: \eta^2 = \frac{f^2}{1 + f^2}
Using :math:`\eta^2`, the sample size :math:`N`, the number of repeated measurements
:math:`m`, the epsilon correction factor :math:`\epsilon` (see :py:func:`pingouin.epsilon`),
and the average correlation between the repeated measures :math:`c`, one can then calculate the
non-centrality parameter as follow:
.. math:: \delta = \frac{f^2 * N * m * \epsilon}{1 - c}
Then the critical value of the non-central F-distribution is computed using the percentile
point function of the F-distribution with:
.. math:: q = 1 - \alpha
.. math:: v_1 = (m - 1) * \epsilon
.. math:: v_2 = (N - 1) * v_1
Finally, the power of the ANOVA is calculated using the survival function of the non-central
F-distribution using the previously computed critical value, non-centrality parameter,
and degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables
(i.e. sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power
>>> from pingouin import power_rm_anova
>>> print('power: %.4f' % power_rm_anova(eta_squared=0.1, m=3, n=20))
power: 0.8913
2. Compute required number of groups
>>> print('m: %.4f' % power_rm_anova(eta_squared=0.1, n=20, power=0.90))
m: 3.1347
3. Compute required sample size
>>> print('n: %.4f' % power_rm_anova(eta_squared=0.1, m=3, power=0.80))
n: 15.9979
4. Compute achieved effect size
>>> print('eta-squared: %.4f' % power_rm_anova(n=20, m=4, power=0.80, alpha=0.05))
eta-squared: 0.0680
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_rm_anova(eta_squared=0.1, n=20, m=4, power=0.80, alpha=None))
alpha: 0.0081
Let's take a more concrete example. First, we'll load a repeated measures
dataset in wide-format. Each row is an observation (e.g. a subject), and
each column a successive repeated measurements (e.g t=0, t=1, ...).
>>> import pingouin as pg
>>> data = pg.read_dataset('rm_anova_wide')
>>> data.head()
Before 1 week 2 week 3 week
0 4.3 5.3 4.8 6.3
1 3.9 2.3 5.6 4.3
2 4.5 2.6 4.1 NaN
3 5.1 4.2 6.0 6.3
4 3.8 3.6 4.8 6.8
Note that this dataset has some missing values. We'll simply delete any row with one or more
missing values, and then compute a repeated measures ANOVA:
>>> data = data.dropna()
>>> pg.rm_anova(data, effsize="n2").round(3)
Source ddof1 ddof2 F p-unc n2 eps
0 Within 3 24 5.201 0.007 0.346 0.694
The repeated measures ANOVA is significant at the 0.05 level. Now, we can
easily compute the power of the ANOVA with the information in the ANOVA table:
>>> # n is the sample size and m is the number of repeated measures
>>> n, m = data.shape
>>> round(pg.power_rm_anova(eta_squared=0.346, m=m, n=n, epsilon=0.694), 3)
0.99
Our ANOVA has a very high statistical power. However, to be even more accurate in our power
calculation, we should also fill in the average correlation among repeated measurements.
Since our dataframe is in wide-format (with each column being a successive measurement), this
can be done by taking the mean of the superdiagonal of the correlation matrix, which is similar
to manually calculating the correlation between each successive pairwise measurements and then
taking the mean. Since correlation coefficients are not normally distributed, we use the
*r-to-z* transform prior to averaging (:py:func:`numpy.arctanh`), and then the *z-to-r*
transform (:py:func:`numpy.tanh`) to convert back to a correlation coefficient. This gives a
more precise estimate of the mean.
>>> import numpy as np
>>> corr = np.diag(data.corr(), k=1)
>>> avgcorr = np.tanh(np.arctanh(corr).mean())
>>> round(avgcorr, 4)
-0.1996
In this example, we're using a fake dataset and the average correlation is negative. However,
it will most likely be positive with real data. Let's now compute the final power of the
repeated measures ANOVA:
>>> round(pg.power_rm_anova(eta_squared=0.346, m=m, n=n, epsilon=0.694, corr=avgcorr), 3)
0.771
| def power_rm_anova(eta_squared=None, m=None, n=None, power=None, alpha=0.05, corr=0.5, epsilon=1):
"""
Evaluate power, sample size, effect size or significance level of a balanced one-way
repeated measures ANOVA.
Parameters
----------
eta_squared : float
ANOVA effect size (eta-squared, :math:`\\eta^2`).
m : int
Number of repeated measurements.
n : int
Sample size per measurement. All measurements must have the same sample size.
power : float
Test power (= 1 - type II error).
alpha : float
Significance level :math:`\\alpha` (type I error probability). The default is 0.05.
corr : float
Average correlation coefficient among repeated measurements. The default is :math:`r=0.5`.
epsilon : float
Epsilon adjustement factor for sphericity. This can be calculated using the
:py:func:`pingouin.epsilon` function.
Notes
-----
Exactly ONE of the parameters ``eta_squared``, ``m``, ``n``, ``power`` and ``alpha`` must be
passed as None, and that parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
GPower uses the :math:`f` effect size instead of the :math:`\\eta^2`. The formula to convert
from one to the other are given below:
.. math:: f = \\sqrt{\\frac{\\eta^2}{1 - \\eta^2}}
.. math:: \\eta^2 = \\frac{f^2}{1 + f^2}
Using :math:`\\eta^2`, the sample size :math:`N`, the number of repeated measurements
:math:`m`, the epsilon correction factor :math:`\\epsilon` (see :py:func:`pingouin.epsilon`),
and the average correlation between the repeated measures :math:`c`, one can then calculate the
non-centrality parameter as follow:
.. math:: \\delta = \\frac{f^2 * N * m * \\epsilon}{1 - c}
Then the critical value of the non-central F-distribution is computed using the percentile
point function of the F-distribution with:
.. math:: q = 1 - \\alpha
.. math:: v_1 = (m - 1) * \\epsilon
.. math:: v_2 = (N - 1) * v_1
Finally, the power of the ANOVA is calculated using the survival function of the non-central
F-distribution using the previously computed critical value, non-centrality parameter,
and degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables
(i.e. sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power
>>> from pingouin import power_rm_anova
>>> print('power: %.4f' % power_rm_anova(eta_squared=0.1, m=3, n=20))
power: 0.8913
2. Compute required number of groups
>>> print('m: %.4f' % power_rm_anova(eta_squared=0.1, n=20, power=0.90))
m: 3.1347
3. Compute required sample size
>>> print('n: %.4f' % power_rm_anova(eta_squared=0.1, m=3, power=0.80))
n: 15.9979
4. Compute achieved effect size
>>> print('eta-squared: %.4f' % power_rm_anova(n=20, m=4, power=0.80, alpha=0.05))
eta-squared: 0.0680
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_rm_anova(eta_squared=0.1, n=20, m=4, power=0.80, alpha=None))
alpha: 0.0081
Let's take a more concrete example. First, we'll load a repeated measures
dataset in wide-format. Each row is an observation (e.g. a subject), and
each column a successive repeated measurements (e.g t=0, t=1, ...).
>>> import pingouin as pg
>>> data = pg.read_dataset('rm_anova_wide')
>>> data.head()
Before 1 week 2 week 3 week
0 4.3 5.3 4.8 6.3
1 3.9 2.3 5.6 4.3
2 4.5 2.6 4.1 NaN
3 5.1 4.2 6.0 6.3
4 3.8 3.6 4.8 6.8
Note that this dataset has some missing values. We'll simply delete any row with one or more
missing values, and then compute a repeated measures ANOVA:
>>> data = data.dropna()
>>> pg.rm_anova(data, effsize="n2").round(3)
Source ddof1 ddof2 F p-unc n2 eps
0 Within 3 24 5.201 0.007 0.346 0.694
The repeated measures ANOVA is significant at the 0.05 level. Now, we can
easily compute the power of the ANOVA with the information in the ANOVA table:
>>> # n is the sample size and m is the number of repeated measures
>>> n, m = data.shape
>>> round(pg.power_rm_anova(eta_squared=0.346, m=m, n=n, epsilon=0.694), 3)
0.99
Our ANOVA has a very high statistical power. However, to be even more accurate in our power
calculation, we should also fill in the average correlation among repeated measurements.
Since our dataframe is in wide-format (with each column being a successive measurement), this
can be done by taking the mean of the superdiagonal of the correlation matrix, which is similar
to manually calculating the correlation between each successive pairwise measurements and then
taking the mean. Since correlation coefficients are not normally distributed, we use the
*r-to-z* transform prior to averaging (:py:func:`numpy.arctanh`), and then the *z-to-r*
transform (:py:func:`numpy.tanh`) to convert back to a correlation coefficient. This gives a
more precise estimate of the mean.
>>> import numpy as np
>>> corr = np.diag(data.corr(), k=1)
>>> avgcorr = np.tanh(np.arctanh(corr).mean())
>>> round(avgcorr, 4)
-0.1996
In this example, we're using a fake dataset and the average correlation is negative. However,
it will most likely be positive with real data. Let's now compute the final power of the
repeated measures ANOVA:
>>> round(pg.power_rm_anova(eta_squared=0.346, m=m, n=n, epsilon=0.694, corr=avgcorr), 3)
0.771
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [eta_squared, m, n, power, alpha]])
if n_none != 1:
msg = "Exactly one of eta, m, n, power, and alpha must be None."
raise ValueError(msg)
# Safety checks
assert 0 < epsilon <= 1, "epsilon must be between 0 and 1."
assert -1 < corr < 1, "corr must be between -1 and 1."
if eta_squared is not None:
eta_squared = abs(eta_squared)
f_sq = eta_squared / (1 - eta_squared)
if alpha is not None:
assert 0 < alpha <= 1, "alpha must be between 0 and 1."
if power is not None:
assert 0 < power <= 1, "power must be between 0 and 1."
if n is not None:
assert n > 1, "The sample size n must be > 1."
if m is not None:
assert m > 1, "The number of repeated measures m must be > 1."
def func(f_sq, m, n, power, alpha, corr):
dof1 = (m - 1) * epsilon
dof2 = (n - 1) * dof1
nc = (f_sq * n * m * epsilon) / (1 - corr)
fcrit = stats.f.ppf(1 - alpha, dof1, dof2)
return stats.ncf.sf(fcrit, dof1, dof2, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power
return func(f_sq, m, n, power, alpha, corr)
elif m is None:
# Compute required number of repeated measures
def _eval_m(m, f_sq, n, power, alpha, corr):
return func(f_sq, m, n, power, alpha, corr) - power
try:
return brenth(_eval_m, 2, 100, args=(f_sq, n, power, alpha, corr))
except ValueError: # pragma: no cover
return np.nan
elif n is None:
# Compute required sample size
def _eval_n(n, f_sq, m, power, alpha, corr):
return func(f_sq, m, n, power, alpha, corr) - power
try:
return brenth(_eval_n, 5, 1e6, args=(f_sq, m, power, alpha, corr))
except ValueError: # pragma: no cover
return np.nan
elif eta_squared is None:
# Compute achieved eta
def _eval_eta(f_sq, m, n, power, alpha, corr):
return func(f_sq, m, n, power, alpha, corr) - power
try:
f_sq = brenth(_eval_eta, 1e-10, 1 - 1e-10, args=(m, n, power, alpha, corr))
return f_sq / (f_sq + 1) # Return eta-square
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha
def _eval_alpha(alpha, f_sq, m, n, power, corr):
return func(f_sq, m, n, power, alpha, corr) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(f_sq, m, n, power, corr))
except ValueError: # pragma: no cover
return np.nan
| (eta_squared=None, m=None, n=None, power=None, alpha=0.05, corr=0.5, epsilon=1) |
32,049 | pingouin.power | power_ttest |
Evaluate power, sample size, effect size or significance level of a one-sample T-test,
a paired T-test or an independent two-samples T-test with equal sample sizes.
Parameters
----------
d : float
Cohen d effect size
n : int
Sample size
In case of a two-sample T-test, sample sizes are assumed to be equal.
Otherwise, see the :py:func:`power_ttest2n` function.
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
contrast : str
Can be `"one-sample"`, `"two-samples"` or `"paired"`.
Note that `"one-sample"` and `"paired"` have the same behavior.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less".
Notes
-----
Exactly ONE of the parameters ``d``, ``n``, ``power`` and ``alpha`` must be passed as None, and
that parameter is determined from the others.
For a paired T-test, the sample size ``n`` corresponds to the number of pairs. For an
independent two-sample T-test with equal sample sizes, ``n`` corresponds to the sample size of
each group (i.e. number of observations in one group). If the sample sizes are unequal, please
use the :py:func:`power_ttest2n` function instead.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
This function is a Python adaptation of the `pwr.t.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
The first step is to use the Cohen's d to calculate the non-centrality parameter
:math:`\delta` and degrees of freedom :math:`v`. In case of paired groups, this is:
.. math:: \delta = d * \sqrt n
.. math:: v = n - 1
and in case of independent groups with equal sample sizes:
.. math:: \delta = d * \sqrt{\frac{n}{2}}
.. math:: v = (n - 1) * 2
where :math:`d` is the Cohen d and :math:`n` the sample size.
The critical value is then found using the percent point function of the T distribution with
:math:`q = 1 - alpha` and :math:`v` degrees of freedom.
Finally, the power of the test is given by the survival function of the non-central
distribution using the previously calculated critical value, degrees of freedom and
non-centrality parameter.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute power of a one-sample T-test given ``d``, ``n`` and ``alpha``
>>> from pingouin import power_ttest
>>> print('power: %.4f' % power_ttest(d=0.5, n=20, contrast='one-sample'))
power: 0.5645
2. Compute required sample size given ``d``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_ttest(d=0.5, power=0.80, alternative='greater'))
n: 50.1508
3. Compute achieved ``d`` given ``n``, ``power`` and ``alpha`` level
>>> print('d: %.4f' % power_ttest(n=20, power=0.80, alpha=0.05, contrast='paired'))
d: 0.6604
4. Compute achieved alpha level given ``d``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_ttest(d=0.5, n=20, power=0.80, alpha=None))
alpha: 0.4430
5. One-sided tests
>>> from pingouin import power_ttest
>>> print('power: %.4f' % power_ttest(d=0.5, n=20, alternative='greater'))
power: 0.4634
>>> print('power: %.4f' % power_ttest(d=0.5, n=20, alternative='less'))
power: 0.0007
| def power_ttest(
d=None, n=None, power=None, alpha=0.05, contrast="two-samples", alternative="two-sided"
):
"""
Evaluate power, sample size, effect size or significance level of a one-sample T-test,
a paired T-test or an independent two-samples T-test with equal sample sizes.
Parameters
----------
d : float
Cohen d effect size
n : int
Sample size
In case of a two-sample T-test, sample sizes are assumed to be equal.
Otherwise, see the :py:func:`power_ttest2n` function.
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
contrast : str
Can be `"one-sample"`, `"two-samples"` or `"paired"`.
Note that `"one-sample"` and `"paired"` have the same behavior.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less".
Notes
-----
Exactly ONE of the parameters ``d``, ``n``, ``power`` and ``alpha`` must be passed as None, and
that parameter is determined from the others.
For a paired T-test, the sample size ``n`` corresponds to the number of pairs. For an
independent two-sample T-test with equal sample sizes, ``n`` corresponds to the sample size of
each group (i.e. number of observations in one group). If the sample sizes are unequal, please
use the :py:func:`power_ttest2n` function instead.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to
compute it.
This function is a Python adaptation of the `pwr.t.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
The first step is to use the Cohen's d to calculate the non-centrality parameter
:math:`\\delta` and degrees of freedom :math:`v`. In case of paired groups, this is:
.. math:: \\delta = d * \\sqrt n
.. math:: v = n - 1
and in case of independent groups with equal sample sizes:
.. math:: \\delta = d * \\sqrt{\\frac{n}{2}}
.. math:: v = (n - 1) * 2
where :math:`d` is the Cohen d and :math:`n` the sample size.
The critical value is then found using the percent point function of the T distribution with
:math:`q = 1 - alpha` and :math:`v` degrees of freedom.
Finally, the power of the test is given by the survival function of the non-central
distribution using the previously calculated critical value, degrees of freedom and
non-centrality parameter.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute power of a one-sample T-test given ``d``, ``n`` and ``alpha``
>>> from pingouin import power_ttest
>>> print('power: %.4f' % power_ttest(d=0.5, n=20, contrast='one-sample'))
power: 0.5645
2. Compute required sample size given ``d``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_ttest(d=0.5, power=0.80, alternative='greater'))
n: 50.1508
3. Compute achieved ``d`` given ``n``, ``power`` and ``alpha`` level
>>> print('d: %.4f' % power_ttest(n=20, power=0.80, alpha=0.05, contrast='paired'))
d: 0.6604
4. Compute achieved alpha level given ``d``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_ttest(d=0.5, n=20, power=0.80, alpha=None))
alpha: 0.4430
5. One-sided tests
>>> from pingouin import power_ttest
>>> print('power: %.4f' % power_ttest(d=0.5, n=20, alternative='greater'))
power: 0.4634
>>> print('power: %.4f' % power_ttest(d=0.5, n=20, alternative='less'))
power: 0.0007
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [d, n, power, alpha]])
if n_none != 1:
raise ValueError("Exactly one of n, d, power, and alpha must be None.")
# Safety checks
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
assert contrast.lower() in ["one-sample", "paired", "two-samples"]
tsample = 2 if contrast.lower() == "two-samples" else 1
tside = 2 if alternative == "two-sided" else 1
if d is not None and tside == 2:
d = abs(d)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
if alternative == "less":
def func(d, n, power, alpha):
dof = (n - 1) * tsample
nc = d * np.sqrt(n / tsample)
tcrit = stats.t.ppf(alpha / tside, dof)
return stats.nct.cdf(tcrit, dof, nc)
elif alternative == "two-sided":
def func(d, n, power, alpha):
dof = (n - 1) * tsample
nc = d * np.sqrt(n / tsample)
tcrit = stats.t.ppf(1 - alpha / tside, dof)
return stats.nct.sf(tcrit, dof, nc) + stats.nct.cdf(-tcrit, dof, nc)
else: # Alternative = 'greater'
def func(d, n, power, alpha):
dof = (n - 1) * tsample
nc = d * np.sqrt(n / tsample)
tcrit = stats.t.ppf(1 - alpha / tside, dof)
return stats.nct.sf(tcrit, dof, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power given d, n and alpha
return func(d, n, power=None, alpha=alpha)
elif n is None:
# Compute required sample size given d, power and alpha
def _eval_n(n, d, power, alpha):
return func(d, n, power, alpha) - power
try:
return brenth(_eval_n, 2 + 1e-10, 1e07, args=(d, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif d is None:
# Compute achieved d given sample size, power and alpha level
if alternative == "two-sided":
b0, b1 = 1e-07, 10
elif alternative == "less":
b0, b1 = -10, 5
else:
b0, b1 = -5, 10
def _eval_d(d, n, power, alpha):
return func(d, n, power, alpha) - power
try:
return brenth(_eval_d, b0, b1, args=(n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha (significance) level given d, n and power
def _eval_alpha(alpha, d, n, power):
return func(d, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(d, n, power))
except ValueError: # pragma: no cover
return np.nan
| (d=None, n=None, power=None, alpha=0.05, contrast='two-samples', alternative='two-sided') |
32,050 | pingouin.power | power_ttest2n |
Evaluate power, effect size or significance level of an independent two-samples T-test
with unequal sample sizes.
Parameters
----------
nx, ny : int
Sample sizes. Must be specified. If the sample sizes are equal, you should use the
:py:func:`power_ttest` function instead.
d : float
Cohen d effect size
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability). The default is 0.05.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of "two-sided"
(default), "greater" or "less".
Notes
-----
Exactly ONE of the parameters ``d``, ``power`` and ``alpha`` must be passed as None, and that
parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to compute
it.
This function is a Python adaptation of the `pwr.t2n.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
The first step is to use the Cohen's d to calculate the non-centrality parameter
:math:`\delta` and degrees of freedom :math:`v`.cIn case of two independent groups with
unequal sample sizes, this is:
.. math:: \delta = d * \sqrt{\frac{n_i * n_j}{n_i + n_j}}
.. math:: v = n_i + n_j - 2
where :math:`d` is the Cohen d, :math:`n` the sample size,
:math:`n_i` the sample size of the first group and
:math:`n_j` the sample size of the second group,
The critical value is then found using the percent point function of the T distribution with
:math:`q = 1 - alpha` and :math:`v` degrees of freedom.
Finally, the power of the test is given by the survival function of the non-central
distribution using the previously calculated critical value, degrees of freedom and
non-centrality parameter.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power of a T-test given ``d``, ``n`` and ``alpha``
>>> from pingouin import power_ttest2n
>>> print('power: %.4f' % power_ttest2n(nx=20, ny=15, d=0.5, alternative='greater'))
power: 0.4164
2. Compute achieved ``d`` given ``n``, ``power`` and ``alpha`` level
>>> print('d: %.4f' % power_ttest2n(nx=20, ny=15, power=0.80, alpha=0.05))
d: 0.9859
3. Compute achieved alpha level given ``d``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_ttest2n(nx=20, ny=15, d=0.5, power=0.80, alpha=None))
alpha: 0.5000
| def power_ttest2n(nx, ny, d=None, power=None, alpha=0.05, alternative="two-sided"):
"""
Evaluate power, effect size or significance level of an independent two-samples T-test
with unequal sample sizes.
Parameters
----------
nx, ny : int
Sample sizes. Must be specified. If the sample sizes are equal, you should use the
:py:func:`power_ttest` function instead.
d : float
Cohen d effect size
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability). The default is 0.05.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of "two-sided"
(default), "greater" or "less".
Notes
-----
Exactly ONE of the parameters ``d``, ``power`` and ``alpha`` must be passed as None, and that
parameter is determined from the others.
``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to compute
it.
This function is a Python adaptation of the `pwr.t2n.test` function implemented in the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Statistical power is the likelihood that a study will detect an effect when there is an effect
there to be detected. A high statistical power means that there is a low probability of
concluding that there is no effect when there is one. Statistical power is mainly affected by
the effect size and the sample size.
The first step is to use the Cohen's d to calculate the non-centrality parameter
:math:`\\delta` and degrees of freedom :math:`v`.cIn case of two independent groups with
unequal sample sizes, this is:
.. math:: \\delta = d * \\sqrt{\\frac{n_i * n_j}{n_i + n_j}}
.. math:: v = n_i + n_j - 2
where :math:`d` is the Cohen d, :math:`n` the sample size,
:math:`n_i` the sample size of the first group and
:math:`n_j` the sample size of the second group,
The critical value is then found using the percent point function of the T distribution with
:math:`q = 1 - alpha` and :math:`v` degrees of freedom.
Finally, the power of the test is given by the survival function of the non-central
distribution using the previously calculated critical value, degrees of freedom and
non-centrality parameter.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e.
sample size, effect size, or significance level). If the solving fails, a nan value is
returned.
Results have been tested against GPower and the
`pwr <https://cran.r-project.org/web/packages/pwr/pwr.pdf>`_ R package.
Examples
--------
1. Compute achieved power of a T-test given ``d``, ``n`` and ``alpha``
>>> from pingouin import power_ttest2n
>>> print('power: %.4f' % power_ttest2n(nx=20, ny=15, d=0.5, alternative='greater'))
power: 0.4164
2. Compute achieved ``d`` given ``n``, ``power`` and ``alpha`` level
>>> print('d: %.4f' % power_ttest2n(nx=20, ny=15, power=0.80, alpha=0.05))
d: 0.9859
3. Compute achieved alpha level given ``d``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_ttest2n(nx=20, ny=15, d=0.5, power=0.80, alpha=None))
alpha: 0.5000
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [d, power, alpha]])
if n_none != 1:
raise ValueError("Exactly one of d, power, and alpha must be None")
# Safety checks
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
tside = 2 if alternative == "two-sided" else 1
if d is not None and tside == 2:
d = abs(d)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
if alternative == "less":
def func(d, nx, ny, power, alpha):
dof = nx + ny - 2
nc = d * (1 / np.sqrt(1 / nx + 1 / ny))
tcrit = stats.t.ppf(alpha / tside, dof)
return stats.nct.cdf(tcrit, dof, nc)
elif alternative == "two-sided":
def func(d, nx, ny, power, alpha):
dof = nx + ny - 2
nc = d * (1 / np.sqrt(1 / nx + 1 / ny))
tcrit = stats.t.ppf(1 - alpha / tside, dof)
return stats.nct.sf(tcrit, dof, nc) + stats.nct.cdf(-tcrit, dof, nc)
else: # Alternative = 'greater'
def func(d, nx, ny, power, alpha):
dof = nx + ny - 2
nc = d * (1 / np.sqrt(1 / nx + 1 / ny))
tcrit = stats.t.ppf(1 - alpha / tside, dof)
return stats.nct.sf(tcrit, dof, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power given d, n and alpha
return func(d, nx, ny, power=None, alpha=alpha)
elif d is None:
# Compute achieved d given sample size, power and alpha level
if alternative == "two-sided":
b0, b1 = 1e-07, 10
elif alternative == "less":
b0, b1 = -10, 5
else:
b0, b1 = -5, 10
def _eval_d(d, nx, ny, power, alpha):
return func(d, nx, ny, power, alpha) - power
try:
return brenth(_eval_d, b0, b1, args=(nx, ny, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha (significance) level given d, n and power
def _eval_alpha(alpha, d, nx, ny, power):
return func(d, nx, ny, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(d, nx, ny, power))
except ValueError: # pragma: no cover
return np.nan
| (nx, ny, d=None, power=None, alpha=0.05, alternative='two-sided') |
32,051 | pingouin.utils | print_table | Pretty display of table.
Parameters
----------
df : :py:class:`pandas.DataFrame`
Dataframe to print (e.g. ANOVA summary)
floatfmt : string
Decimal number formatting
tablefmt : string
Table format (e.g. 'simple', 'plain', 'html', 'latex', 'grid', 'rst').
For a full list of available formats, please refer to
https://pypi.org/project/tabulate/
| def print_table(df, floatfmt=".3f", tablefmt="simple"):
"""Pretty display of table.
Parameters
----------
df : :py:class:`pandas.DataFrame`
Dataframe to print (e.g. ANOVA summary)
floatfmt : string
Decimal number formatting
tablefmt : string
Table format (e.g. 'simple', 'plain', 'html', 'latex', 'grid', 'rst').
For a full list of available formats, please refer to
https://pypi.org/project/tabulate/
"""
if "F" in df.keys():
print("\n=============\nANOVA SUMMARY\n=============\n")
if "A" in df.keys():
print("\n==============\nPOST HOC TESTS\n==============\n")
print(tabulate(df, headers="keys", showindex=False, floatfmt=floatfmt, tablefmt=tablefmt))
print("")
| (df, floatfmt='.3f', tablefmt='simple') |
32,052 | pingouin.pairwise | ptests |
Pairwise T-test between columns of a dataframe.
T-values are reported on the lower triangle of the output pairwise matrix and p-values on the
upper triangle. This method is a faster, but less exhaustive, matrix-version of the
:py:func:`pingouin.pairwise_test` function. Missing values are automatically removed from each
pairwise T-test.
.. versionadded:: 0.5.3
Parameters
----------
self : :py:class:`pandas.DataFrame`
Input dataframe.
paired : boolean
Specify whether the two observations are related (i.e. repeated measures) or independent.
decimals : int
Number of decimals to display in the output matrix.
padjust : string or None
P-values adjustment for multiple comparison
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
stars : boolean
If True, only significant p-values are displayed as stars using the pre-defined thresholds
of ``pval_stars``. If False, all the raw p-values are displayed.
pval_stars : dict
Significance thresholds. Default is 3 stars for p-values <0.001, 2 stars for
p-values <0.01 and 1 star for p-values <0.05.
**kwargs : optional
Optional argument(s) passed to the lower-level scipy functions, i.e.
:py:func:`scipy.stats.ttest_ind` for independent T-test and
:py:func:`scipy.stats.ttest_rel` for paired T-test.
Returns
-------
mat : :py:class:`pandas.DataFrame`
Pairwise T-test matrix, of dtype str, with T-values on the lower triangle and p-values on
the upper triangle.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import pingouin as pg
>>> # Load an example dataset of personality dimensions
>>> df = pg.read_dataset('pairwise_corr').iloc[:30, 1:]
>>> df.columns = ["N", "E", "O", 'A', "C"]
>>> # Add some missing values
>>> df.iloc[[2, 5, 20], 2] = np.nan
>>> df.iloc[[1, 4, 10], 3] = np.nan
>>> df.head().round(2)
N E O A C
0 2.48 4.21 3.94 3.96 3.46
1 2.60 3.19 3.96 NaN 3.23
2 2.81 2.90 NaN 2.75 3.50
3 2.90 3.56 3.52 3.17 2.79
4 3.02 3.33 4.02 NaN 2.85
Independent pairwise T-tests
>>> df.ptests()
N E O A C
N - *** *** *** ***
E -8.397 - ***
O -8.332 -0.596 - ***
A -8.804 0.12 0.72 - ***
C -4.759 3.753 4.074 3.787 -
Let's compare with SciPy
>>> from scipy.stats import ttest_ind
>>> np.round(ttest_ind(df["N"], df["E"]), 3)
array([-8.397, 0. ])
Passing custom parameters to the lower-level :py:func:`scipy.stats.ttest_ind` function
>>> df.ptests(alternative="greater", equal_var=True)
N E O A C
N -
E -8.397 - ***
O -8.332 -0.596 - ***
A -8.804 0.12 0.72 - ***
C -4.759 3.753 4.074 3.787 -
Paired T-test, showing the actual p-values instead of stars
>>> df.ptests(paired=True, stars=False, decimals=4)
N E O A C
N - 0.0000 0.0000 0.0000 0.0002
E -7.0773 - 0.8776 0.7522 0.0012
O -8.0568 -0.1555 - 0.8137 0.0008
A -8.3994 0.3191 0.2383 - 0.0009
C -4.2511 3.5953 3.7849 3.7652 -
Adjusting for multiple comparisons using the Holm-Bonferroni method
>>> df.ptests(paired=True, stars=False, padjust="holm")
N E O A C
N - 0.000 0.000 0.000 0.001
E -7.077 - 1. 1. 0.005
O -8.057 -0.155 - 1. 0.005
A -8.399 0.319 0.238 - 0.005
C -4.251 3.595 3.785 3.765 -
| @pf.register_dataframe_method
def ptests(
self,
paired=False,
decimals=3,
padjust=None,
stars=True,
pval_stars={0.001: "***", 0.01: "**", 0.05: "*"},
**kwargs,
):
"""
Pairwise T-test between columns of a dataframe.
T-values are reported on the lower triangle of the output pairwise matrix and p-values on the
upper triangle. This method is a faster, but less exhaustive, matrix-version of the
:py:func:`pingouin.pairwise_test` function. Missing values are automatically removed from each
pairwise T-test.
.. versionadded:: 0.5.3
Parameters
----------
self : :py:class:`pandas.DataFrame`
Input dataframe.
paired : boolean
Specify whether the two observations are related (i.e. repeated measures) or independent.
decimals : int
Number of decimals to display in the output matrix.
padjust : string or None
P-values adjustment for multiple comparison
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
stars : boolean
If True, only significant p-values are displayed as stars using the pre-defined thresholds
of ``pval_stars``. If False, all the raw p-values are displayed.
pval_stars : dict
Significance thresholds. Default is 3 stars for p-values <0.001, 2 stars for
p-values <0.01 and 1 star for p-values <0.05.
**kwargs : optional
Optional argument(s) passed to the lower-level scipy functions, i.e.
:py:func:`scipy.stats.ttest_ind` for independent T-test and
:py:func:`scipy.stats.ttest_rel` for paired T-test.
Returns
-------
mat : :py:class:`pandas.DataFrame`
Pairwise T-test matrix, of dtype str, with T-values on the lower triangle and p-values on
the upper triangle.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import pingouin as pg
>>> # Load an example dataset of personality dimensions
>>> df = pg.read_dataset('pairwise_corr').iloc[:30, 1:]
>>> df.columns = ["N", "E", "O", 'A', "C"]
>>> # Add some missing values
>>> df.iloc[[2, 5, 20], 2] = np.nan
>>> df.iloc[[1, 4, 10], 3] = np.nan
>>> df.head().round(2)
N E O A C
0 2.48 4.21 3.94 3.96 3.46
1 2.60 3.19 3.96 NaN 3.23
2 2.81 2.90 NaN 2.75 3.50
3 2.90 3.56 3.52 3.17 2.79
4 3.02 3.33 4.02 NaN 2.85
Independent pairwise T-tests
>>> df.ptests()
N E O A C
N - *** *** *** ***
E -8.397 - ***
O -8.332 -0.596 - ***
A -8.804 0.12 0.72 - ***
C -4.759 3.753 4.074 3.787 -
Let's compare with SciPy
>>> from scipy.stats import ttest_ind
>>> np.round(ttest_ind(df["N"], df["E"]), 3)
array([-8.397, 0. ])
Passing custom parameters to the lower-level :py:func:`scipy.stats.ttest_ind` function
>>> df.ptests(alternative="greater", equal_var=True)
N E O A C
N -
E -8.397 - ***
O -8.332 -0.596 - ***
A -8.804 0.12 0.72 - ***
C -4.759 3.753 4.074 3.787 -
Paired T-test, showing the actual p-values instead of stars
>>> df.ptests(paired=True, stars=False, decimals=4)
N E O A C
N - 0.0000 0.0000 0.0000 0.0002
E -7.0773 - 0.8776 0.7522 0.0012
O -8.0568 -0.1555 - 0.8137 0.0008
A -8.3994 0.3191 0.2383 - 0.0009
C -4.2511 3.5953 3.7849 3.7652 -
Adjusting for multiple comparisons using the Holm-Bonferroni method
>>> df.ptests(paired=True, stars=False, padjust="holm")
N E O A C
N - 0.000 0.000 0.000 0.001
E -7.077 - 1. 1. 0.005
O -8.057 -0.155 - 1. 0.005
A -8.399 0.319 0.238 - 0.005
C -4.251 3.595 3.785 3.765 -
"""
from itertools import combinations
from numpy import triu_indices_from as tif
from numpy import format_float_positional as ffp
from scipy.stats import ttest_ind, ttest_rel
assert isinstance(pval_stars, dict), "pval_stars must be a dictionary."
assert isinstance(decimals, int), "decimals must be an int."
if paired:
func = ttest_rel
else:
func = ttest_ind
# Get T-values and p-values
# We cannot use pandas.DataFrame.corr here because it will incorrectly remove rows missing
# values, even when using an independent T-test!
cols = self.columns
combs = list(combinations(cols, 2))
mat = pd.DataFrame(columns=cols, index=cols, dtype=np.float64)
mat_upper = mat.copy()
for a, b in combs:
t, p = func(self[a], self[b], **kwargs, nan_policy="omit")
mat.loc[b, a] = np.round(t, decimals)
# Do not round p-value here, or we'll lose precision for multicomp
mat_upper.loc[a, b] = p
if padjust is not None:
pvals = mat_upper.to_numpy()[tif(mat, k=1)]
mat_upper.to_numpy()[tif(mat, k=1)] = multicomp(pvals, alpha=0.05, method=padjust)[1]
# Convert T-values to str, and fill the diagonal with "-"
mat = mat.astype(str)
np.fill_diagonal(mat.to_numpy(), "-")
def replace_pval(x):
for key, value in pval_stars.items():
if x < key:
return value
return ""
if stars:
# Replace p-values by stars
mat_upper = mat_upper.applymap(replace_pval)
else:
mat_upper = mat_upper.applymap(lambda x: ffp(x, precision=decimals))
# Replace upper triangle by p-values
mat.to_numpy()[tif(mat, k=1)] = mat_upper.to_numpy()[tif(mat, k=1)]
return mat
| (self, paired=False, decimals=3, padjust=None, stars=True, pval_stars={0.001: '***', 0.01: '**', 0.05: '*'}, **kwargs) |
32,053 | pingouin.plotting | qqplot | Quantile-Quantile plot.
Parameters
----------
x : array_like
Sample data.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is `'norm'`
for a normal probability plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters, location,
and scale). See :py:func:`scipy.stats.probplot` for more details.
confidence : float
Confidence level (.95 = 95%) for point-wise confidence envelope.
Can be disabled by passing False.
square: bool
If True (default), ensure equal aspect ratio between X and Y axes.
ax : matplotlib axes
Axis on which to draw the plot
**kwargs : optional
Optional argument(s) passed to :py:func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Raises
------
ValueError
If ``sparams`` does not contain the required parameters for ``dist``.
(e.g. :py:class:`scipy.stats.t` has a mandatory degrees of
freedom parameter *df*.)
Notes
-----
This function returns a scatter plot of the quantile of the sample data
``x`` against the theoretical quantiles of the distribution given in
``dist`` (default = *'norm'*).
The points plotted in a Q–Q plot are always non-decreasing when viewed
from left to right. If the two distributions being compared are identical,
the Q–Q plot follows the 45° line y = x. If the two distributions agree
after linearly transforming the values in one of the distributions,
then the Q–Q plot follows some line, but not necessarily the line y = x.
If the general trend of the Q–Q plot is flatter than the line y = x,
the distribution plotted on the horizontal axis is more dispersed than
the distribution plotted on the vertical axis. Conversely, if the general
trend of the Q–Q plot is steeper than the line y = x, the distribution
plotted on the vertical axis is more dispersed than the distribution
plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped,
indicating that one of the distributions is more skewed than the other,
or that one of the distributions has heavier tails than the other.
In addition, the function also plots a best-fit line (linear regression)
for the data and annotates the plot with the coefficient of
determination :math:`R^2`. Note that the intercept and slope of the
linear regression between the quantiles gives a measure of the relative
location and relative scale of the samples.
.. warning:: Be extra careful when using fancier distributions with several
parameters. Always double-check your results with another
software or package.
References
----------
* https://github.com/cran/car/blob/master/R/qqPlot.R
* Fox, J. (2008), Applied Regression Analysis and Generalized Linear
Models, 2nd Ed., Sage Publications, Inc.
Examples
--------
Q-Q plot using a normal theoretical distribution:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> ax = pg.qqplot(x, dist='norm')
Two Q-Q plots using two separate axes:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> x_exp = np.random.exponential(size=50)
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
>>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False)
>>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2)
Using custom location / scale parameters as well as another Seaborn style
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> mean, std = 0, 0.8
>>> sns.set_style('darkgrid')
>>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std))
| def qqplot(x, dist="norm", sparams=(), confidence=0.95, square=True, ax=None, **kwargs):
"""Quantile-Quantile plot.
Parameters
----------
x : array_like
Sample data.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is `'norm'`
for a normal probability plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters, location,
and scale). See :py:func:`scipy.stats.probplot` for more details.
confidence : float
Confidence level (.95 = 95%) for point-wise confidence envelope.
Can be disabled by passing False.
square: bool
If True (default), ensure equal aspect ratio between X and Y axes.
ax : matplotlib axes
Axis on which to draw the plot
**kwargs : optional
Optional argument(s) passed to :py:func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Raises
------
ValueError
If ``sparams`` does not contain the required parameters for ``dist``.
(e.g. :py:class:`scipy.stats.t` has a mandatory degrees of
freedom parameter *df*.)
Notes
-----
This function returns a scatter plot of the quantile of the sample data
``x`` against the theoretical quantiles of the distribution given in
``dist`` (default = *'norm'*).
The points plotted in a Q–Q plot are always non-decreasing when viewed
from left to right. If the two distributions being compared are identical,
the Q–Q plot follows the 45° line y = x. If the two distributions agree
after linearly transforming the values in one of the distributions,
then the Q–Q plot follows some line, but not necessarily the line y = x.
If the general trend of the Q–Q plot is flatter than the line y = x,
the distribution plotted on the horizontal axis is more dispersed than
the distribution plotted on the vertical axis. Conversely, if the general
trend of the Q–Q plot is steeper than the line y = x, the distribution
plotted on the vertical axis is more dispersed than the distribution
plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped,
indicating that one of the distributions is more skewed than the other,
or that one of the distributions has heavier tails than the other.
In addition, the function also plots a best-fit line (linear regression)
for the data and annotates the plot with the coefficient of
determination :math:`R^2`. Note that the intercept and slope of the
linear regression between the quantiles gives a measure of the relative
location and relative scale of the samples.
.. warning:: Be extra careful when using fancier distributions with several
parameters. Always double-check your results with another
software or package.
References
----------
* https://github.com/cran/car/blob/master/R/qqPlot.R
* Fox, J. (2008), Applied Regression Analysis and Generalized Linear
Models, 2nd Ed., Sage Publications, Inc.
Examples
--------
Q-Q plot using a normal theoretical distribution:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> ax = pg.qqplot(x, dist='norm')
Two Q-Q plots using two separate axes:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> x_exp = np.random.exponential(size=50)
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
>>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False)
>>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2)
Using custom location / scale parameters as well as another Seaborn style
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> mean, std = 0, 0.8
>>> sns.set_style('darkgrid')
>>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std))
"""
# Update default kwargs with specified inputs
_scatter_kwargs = {"marker": "o", "color": "blue"}
_scatter_kwargs.update(kwargs)
if isinstance(dist, str):
dist = getattr(stats, dist)
x = np.asarray(x)
x = x[~np.isnan(x)] # NaN are automatically removed
# Check sparams: if single parameter, tuple becomes int
if not isinstance(sparams, (tuple, list)):
sparams = (sparams,)
# For fancier distributions, check that the required parameters are passed
if len(sparams) < dist.numargs:
raise ValueError(
"The following sparams are required for this "
"distribution: %s. See scipy.stats.%s for details." % (dist.shapes, dist.name)
)
# Extract quantiles and regression
quantiles = stats.probplot(x, sparams=sparams, dist=dist, fit=False)
theor, observed = quantiles[0], quantiles[1]
fit_params = dist.fit(x)
loc = fit_params[-2]
scale = fit_params[-1]
shape = fit_params[:-2] if len(fit_params) > 2 else None
# Observed values to observed quantiles
if loc != 0 and scale != 1:
observed = (np.sort(observed) - fit_params[-2]) / fit_params[-1]
# Linear regression
slope, intercept, r, _, _ = stats.linregress(theor, observed)
# Start the plot
if ax is None:
ax = plt.gca()
ax.scatter(theor, observed, **_scatter_kwargs)
ax.set_xlabel("Theoretical quantiles")
ax.set_ylabel("Ordered quantiles")
# Add diagonal line
end_pts = [ax.get_xlim(), ax.get_ylim()]
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, color="slategrey", lw=1.5)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
# Add regression line and annotate R2
fit_val = slope * theor + intercept
ax.plot(theor, fit_val, "r-", lw=2)
posx = end_pts[0] + 0.60 * (end_pts[1] - end_pts[0])
posy = end_pts[0] + 0.10 * (end_pts[1] - end_pts[0])
ax.text(posx, posy, "$R^2=%.3f$" % r**2)
if confidence is not False:
# Confidence envelope
n = x.size
P = _ppoints(n)
crit = stats.norm.ppf(1 - (1 - confidence) / 2)
pdf = dist.pdf(theor) if shape is None else dist.pdf(theor, *shape)
se = (slope / pdf) * np.sqrt(P * (1 - P) / n)
upper = fit_val + crit * se
lower = fit_val - crit * se
ax.plot(theor, upper, "r--", lw=1.25)
ax.plot(theor, lower, "r--", lw=1.25)
# Make square
if square:
ax.set_aspect("equal")
return ax
| (x, dist='norm', sparams=(), confidence=0.95, square=True, ax=None, **kwargs) |
32,054 | pingouin.correlation | rcorr |
Correlation matrix of a dataframe with p-values and/or sample size on the
upper triangle (:py:class:`pandas.DataFrame` method).
This method is a faster, but less exhaustive, matrix-version of the
:py:func:`pingouin.pairwise_corr` function. It is based on the
:py:func:`pandas.DataFrame.corr` method. Missing values are automatically
removed from each pairwise correlation.
Parameters
----------
self : :py:class:`pandas.DataFrame`
Input dataframe.
method : str
Correlation method. Can be either 'pearson' or 'spearman'.
upper : str
If 'pval', the upper triangle of the output correlation matrix shows
the p-values. If 'n', the upper triangle is the sample size used in
each pairwise correlation.
decimals : int
Number of decimals to display in the output correlation matrix.
padjust : string or None
Method used for testing and adjustment of pvalues.
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
stars : boolean
If True, only significant p-values are displayed as stars using the
pre-defined thresholds of ``pval_stars``. If False, all the raw
p-values are displayed.
pval_stars : dict
Significance thresholds. Default is 3 stars for p-values < 0.001,
2 stars for p-values < 0.01 and 1 star for p-values < 0.05.
Returns
-------
rcorr : :py:class:`pandas.DataFrame`
Correlation matrix, of type str.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import pingouin as pg
>>> # Load an example dataset of personality dimensions
>>> df = pg.read_dataset('pairwise_corr').iloc[:, 1:]
>>> # Add some missing values
>>> df.iloc[[2, 5, 20], 2] = np.nan
>>> df.iloc[[1, 4, 10], 3] = np.nan
>>> df.head().round(2)
Neuroticism Extraversion Openness Agreeableness Conscientiousness
0 2.48 4.21 3.94 3.96 3.46
1 2.60 3.19 3.96 NaN 3.23
2 2.81 2.90 NaN 2.75 3.50
3 2.90 3.56 3.52 3.17 2.79
4 3.02 3.33 4.02 NaN 2.85
>>> # Correlation matrix on the four first columns
>>> df.iloc[:, 0:4].rcorr()
Neuroticism Extraversion Openness Agreeableness
Neuroticism - *** **
Extraversion -0.35 - ***
Openness -0.01 0.265 - ***
Agreeableness -0.134 0.054 0.161 -
>>> # Spearman correlation and Holm adjustement for multiple comparisons
>>> df.iloc[:, 0:4].rcorr(method='spearman', padjust='holm')
Neuroticism Extraversion Openness Agreeableness
Neuroticism - *** **
Extraversion -0.325 - ***
Openness -0.027 0.24 - ***
Agreeableness -0.15 0.06 0.173 -
>>> # Compare with the pg.pairwise_corr function
>>> pairwise = df.iloc[:, 0:4].pairwise_corr(method='spearman',
... padjust='holm')
>>> pairwise[['X', 'Y', 'r', 'p-corr']].round(3) # Do not show all columns
X Y r p-corr
0 Neuroticism Extraversion -0.325 0.000
1 Neuroticism Openness -0.027 0.543
2 Neuroticism Agreeableness -0.150 0.002
3 Extraversion Openness 0.240 0.000
4 Extraversion Agreeableness 0.060 0.358
5 Openness Agreeableness 0.173 0.000
>>> # Display the raw p-values with four decimals
>>> df.iloc[:, [0, 1, 3]].rcorr(stars=False, decimals=4)
Neuroticism Extraversion Agreeableness
Neuroticism - 0.0000 0.0028
Extraversion -0.3501 - 0.2305
Agreeableness -0.134 0.0539 -
>>> # With the sample size on the upper triangle instead of the p-values
>>> df.iloc[:, [0, 1, 2]].rcorr(upper='n')
Neuroticism Extraversion Openness
Neuroticism - 500 497
Extraversion -0.35 - 497
Openness -0.01 0.265 -
| @pf.register_dataframe_method
def rcorr(
self,
method="pearson",
upper="pval",
decimals=3,
padjust=None,
stars=True,
pval_stars={0.001: "***", 0.01: "**", 0.05: "*"},
):
"""
Correlation matrix of a dataframe with p-values and/or sample size on the
upper triangle (:py:class:`pandas.DataFrame` method).
This method is a faster, but less exhaustive, matrix-version of the
:py:func:`pingouin.pairwise_corr` function. It is based on the
:py:func:`pandas.DataFrame.corr` method. Missing values are automatically
removed from each pairwise correlation.
Parameters
----------
self : :py:class:`pandas.DataFrame`
Input dataframe.
method : str
Correlation method. Can be either 'pearson' or 'spearman'.
upper : str
If 'pval', the upper triangle of the output correlation matrix shows
the p-values. If 'n', the upper triangle is the sample size used in
each pairwise correlation.
decimals : int
Number of decimals to display in the output correlation matrix.
padjust : string or None
Method used for testing and adjustment of pvalues.
* ``'none'``: no correction
* ``'bonf'``: one-step Bonferroni correction
* ``'sidak'``: one-step Sidak correction
* ``'holm'``: step-down method using Bonferroni adjustments
* ``'fdr_bh'``: Benjamini/Hochberg FDR correction
* ``'fdr_by'``: Benjamini/Yekutieli FDR correction
stars : boolean
If True, only significant p-values are displayed as stars using the
pre-defined thresholds of ``pval_stars``. If False, all the raw
p-values are displayed.
pval_stars : dict
Significance thresholds. Default is 3 stars for p-values < 0.001,
2 stars for p-values < 0.01 and 1 star for p-values < 0.05.
Returns
-------
rcorr : :py:class:`pandas.DataFrame`
Correlation matrix, of type str.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import pingouin as pg
>>> # Load an example dataset of personality dimensions
>>> df = pg.read_dataset('pairwise_corr').iloc[:, 1:]
>>> # Add some missing values
>>> df.iloc[[2, 5, 20], 2] = np.nan
>>> df.iloc[[1, 4, 10], 3] = np.nan
>>> df.head().round(2)
Neuroticism Extraversion Openness Agreeableness Conscientiousness
0 2.48 4.21 3.94 3.96 3.46
1 2.60 3.19 3.96 NaN 3.23
2 2.81 2.90 NaN 2.75 3.50
3 2.90 3.56 3.52 3.17 2.79
4 3.02 3.33 4.02 NaN 2.85
>>> # Correlation matrix on the four first columns
>>> df.iloc[:, 0:4].rcorr()
Neuroticism Extraversion Openness Agreeableness
Neuroticism - *** **
Extraversion -0.35 - ***
Openness -0.01 0.265 - ***
Agreeableness -0.134 0.054 0.161 -
>>> # Spearman correlation and Holm adjustement for multiple comparisons
>>> df.iloc[:, 0:4].rcorr(method='spearman', padjust='holm')
Neuroticism Extraversion Openness Agreeableness
Neuroticism - *** **
Extraversion -0.325 - ***
Openness -0.027 0.24 - ***
Agreeableness -0.15 0.06 0.173 -
>>> # Compare with the pg.pairwise_corr function
>>> pairwise = df.iloc[:, 0:4].pairwise_corr(method='spearman',
... padjust='holm')
>>> pairwise[['X', 'Y', 'r', 'p-corr']].round(3) # Do not show all columns
X Y r p-corr
0 Neuroticism Extraversion -0.325 0.000
1 Neuroticism Openness -0.027 0.543
2 Neuroticism Agreeableness -0.150 0.002
3 Extraversion Openness 0.240 0.000
4 Extraversion Agreeableness 0.060 0.358
5 Openness Agreeableness 0.173 0.000
>>> # Display the raw p-values with four decimals
>>> df.iloc[:, [0, 1, 3]].rcorr(stars=False, decimals=4)
Neuroticism Extraversion Agreeableness
Neuroticism - 0.0000 0.0028
Extraversion -0.3501 - 0.2305
Agreeableness -0.134 0.0539 -
>>> # With the sample size on the upper triangle instead of the p-values
>>> df.iloc[:, [0, 1, 2]].rcorr(upper='n')
Neuroticism Extraversion Openness
Neuroticism - 500 497
Extraversion -0.35 - 497
Openness -0.01 0.265 -
"""
from numpy import triu_indices_from as tif
from numpy import format_float_positional as ffp
from scipy.stats import pearsonr, spearmanr
# Safety check
assert isinstance(pval_stars, dict), "pval_stars must be a dictionnary."
assert isinstance(decimals, int), "decimals must be an int."
assert method in ["pearson", "spearman"], "Method is not recognized."
assert upper in ["pval", "n"], "upper must be either `pval` or `n`."
mat = self.corr(method=method, numeric_only=True).round(decimals)
if upper == "n":
mat_upper = self.corr(method=lambda x, y: len(x), numeric_only=True).astype(int)
else:
if method == "pearson":
mat_upper = self.corr(method=lambda x, y: pearsonr(x, y)[1], numeric_only=True)
else:
# Method = 'spearman'
mat_upper = self.corr(method=lambda x, y: spearmanr(x, y)[1], numeric_only=True)
if padjust is not None:
pvals = mat_upper.to_numpy()[tif(mat, k=1)]
mat_upper.to_numpy()[tif(mat, k=1)] = multicomp(pvals, alpha=0.05, method=padjust)[1]
# Convert r to text
mat = mat.astype(str)
# Inplace modification of the diagonal
np.fill_diagonal(mat.to_numpy(), "-")
if upper == "pval":
def replace_pval(x):
for key, value in pval_stars.items():
if x < key:
return value
return ""
if stars:
# Replace p-values by stars
mat_upper = mat_upper.applymap(replace_pval)
else:
mat_upper = mat_upper.applymap(lambda x: ffp(x, precision=decimals))
# Replace upper triangle by p-values or n
mat.to_numpy()[tif(mat, k=1)] = mat_upper.to_numpy()[tif(mat, k=1)]
return mat
| (self, method='pearson', upper='pval', decimals=3, padjust=None, stars=True, pval_stars={0.001: '***', 0.01: '**', 0.05: '*'}) |
32,055 | pingouin.datasets | read_dataset | Read example datasets.
Parameters
----------
dname : string
Name of dataset to read (without extension).
Must be a valid dataset present in pingouin.datasets
Returns
-------
data : :py:class:`pandas.DataFrame`
Requested dataset.
Examples
--------
Load the `Penguin <https://github.com/allisonhorst/palmerpenguins>`_
dataset:
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> df # doctest: +SKIP
species island bill_length_mm ... flipper_length_mm body_mass_g sex
0 Adelie Biscoe 37.8 ... 174.0 3400.0 female
1 Adelie Biscoe 37.7 ... 180.0 3600.0 male
2 Adelie Biscoe 35.9 ... 189.0 3800.0 female
3 Adelie Biscoe 38.2 ... 185.0 3950.0 male
4 Adelie Biscoe 38.8 ... 180.0 3800.0 male
.. ... ... ... ... ... ... ...
339 Gentoo Biscoe NaN ... NaN NaN NaN
340 Gentoo Biscoe 46.8 ... 215.0 4850.0 female
341 Gentoo Biscoe 50.4 ... 222.0 5750.0 male
342 Gentoo Biscoe 45.2 ... 212.0 5200.0 female
343 Gentoo Biscoe 49.9 ... 213.0 5400.0 male
| def read_dataset(dname):
"""Read example datasets.
Parameters
----------
dname : string
Name of dataset to read (without extension).
Must be a valid dataset present in pingouin.datasets
Returns
-------
data : :py:class:`pandas.DataFrame`
Requested dataset.
Examples
--------
Load the `Penguin <https://github.com/allisonhorst/palmerpenguins>`_
dataset:
>>> import pingouin as pg
>>> df = pg.read_dataset('penguins')
>>> df # doctest: +SKIP
species island bill_length_mm ... flipper_length_mm body_mass_g sex
0 Adelie Biscoe 37.8 ... 174.0 3400.0 female
1 Adelie Biscoe 37.7 ... 180.0 3600.0 male
2 Adelie Biscoe 35.9 ... 189.0 3800.0 female
3 Adelie Biscoe 38.2 ... 185.0 3950.0 male
4 Adelie Biscoe 38.8 ... 180.0 3800.0 male
.. ... ... ... ... ... ... ...
339 Gentoo Biscoe NaN ... NaN NaN NaN
340 Gentoo Biscoe 46.8 ... 215.0 4850.0 female
341 Gentoo Biscoe 50.4 ... 222.0 5750.0 male
342 Gentoo Biscoe 45.2 ... 212.0 5200.0 female
343 Gentoo Biscoe 49.9 ... 213.0 5400.0 male
"""
# Check extension
d, ext = op.splitext(dname)
if ext.lower() == ".csv":
dname = d
# Check that dataset exist
if dname not in dts["dataset"].to_numpy():
raise ValueError(
"Dataset does not exist. Valid datasets names are", dts["dataset"].to_numpy()
)
# Load dataset
return pd.read_csv(op.join(ddir, dname + ".csv"), sep=",")
| (dname) |
32,058 | pingouin.utils | remove_na | Remove missing values along a given axis in one or more (paired) numpy arrays.
Parameters
----------
x, y : 1D or 2D arrays
Data. ``x`` and ``y`` must have the same number of dimensions.
``y`` can be None to only remove missing values in ``x``.
paired : bool
Indicates if the measurements are paired or not.
axis : str
Axis or axes along which missing values are removed.
Can be 'rows' or 'columns'. This has no effect if ``x`` and ``y`` are
one-dimensional arrays.
Returns
-------
x, y : np.ndarray
Data without missing values
Examples
--------
Single 1D array
>>> import numpy as np
>>> from pingouin import remove_na
>>> x = [6.4, 3.2, 4.5, np.nan]
>>> remove_na(x)
array([6.4, 3.2, 4.5])
With two paired 1D arrays
>>> y = [2.3, np.nan, 5.2, 4.6]
>>> remove_na(x, y, paired=True)
(array([6.4, 4.5]), array([2.3, 5.2]))
With two independent 2D arrays
>>> x = np.array([[4, 2], [4, np.nan], [7, 6]])
>>> y = np.array([[6, np.nan], [3, 2], [2, 2]])
>>> x_no_nan, y_no_nan = remove_na(x, y, paired=False)
| def remove_na(x, y=None, paired=False, axis="rows"):
"""Remove missing values along a given axis in one or more (paired) numpy arrays.
Parameters
----------
x, y : 1D or 2D arrays
Data. ``x`` and ``y`` must have the same number of dimensions.
``y`` can be None to only remove missing values in ``x``.
paired : bool
Indicates if the measurements are paired or not.
axis : str
Axis or axes along which missing values are removed.
Can be 'rows' or 'columns'. This has no effect if ``x`` and ``y`` are
one-dimensional arrays.
Returns
-------
x, y : np.ndarray
Data without missing values
Examples
--------
Single 1D array
>>> import numpy as np
>>> from pingouin import remove_na
>>> x = [6.4, 3.2, 4.5, np.nan]
>>> remove_na(x)
array([6.4, 3.2, 4.5])
With two paired 1D arrays
>>> y = [2.3, np.nan, 5.2, 4.6]
>>> remove_na(x, y, paired=True)
(array([6.4, 4.5]), array([2.3, 5.2]))
With two independent 2D arrays
>>> x = np.array([[4, 2], [4, np.nan], [7, 6]])
>>> y = np.array([[6, np.nan], [3, 2], [2, 2]])
>>> x_no_nan, y_no_nan = remove_na(x, y, paired=False)
"""
# Safety checks
x = np.asarray(x)
assert axis in ["rows", "columns"], "axis must be rows or columns."
if y is None:
return _remove_na_single(x, axis=axis)
elif isinstance(y, (int, float, str)):
return _remove_na_single(x, axis=axis), y
else: # y is list, np.array, pd.Series
y = np.asarray(y)
assert y.size != 0, "y cannot be an empty list or array."
# Make sure that we just pass-through if y have only 1 element
if y.size == 1:
return _remove_na_single(x, axis=axis), y
if x.ndim != y.ndim or paired is False:
# x and y do not have the same dimension
x_no_nan = _remove_na_single(x, axis=axis)
y_no_nan = _remove_na_single(y, axis=axis)
return x_no_nan, y_no_nan
# At this point, we assume that x and y are paired and have same dimensions
if x.ndim == 1:
# 1D arrays
x_mask = ~np.isnan(x)
y_mask = ~np.isnan(y)
else:
# 2D arrays
ax = 1 if axis == "rows" else 0
x_mask = ~np.any(np.isnan(x), axis=ax)
y_mask = ~np.any(np.isnan(y), axis=ax)
# Check if missing values are present
if ~x_mask.all() or ~y_mask.all():
ax = 0 if axis == "rows" else 1
ax = 0 if x.ndim == 1 else ax
both = np.logical_and(x_mask, y_mask)
x = x.compress(both, axis=ax)
y = y.compress(both, axis=ax)
return x, y
| (x, y=None, paired=False, axis='rows') |
32,059 | pingouin.parametric | rm_anova | One-way and two-way repeated measures ANOVA.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
:py:class:`pandas.DataFrame` method, in which case this argument is no
longer needed.
Both wide and long-format dataframe are supported for one-way repeated
measures ANOVA. However, ``data`` must be in long format for two-way
repeated measures.
dv : string
Name of column containing the dependent variable (only required if
``data`` is in long format).
within : string or list of string
Name of column containing the within factor (only required if ``data``
is in long format).
If ``within`` is a single string, then compute a one-way repeated
measures ANOVA, if ``within`` is a list with two strings,
compute a two-way repeated measures ANOVA.
subject : string
Name of column containing the subject identifier (only required if
``data`` is in long format).
correction : string or boolean
If True, also return the Greenhouse-Geisser corrected p-value.
The default for one-way design is to compute Mauchly's test of
sphericity to determine whether the p-values needs to be corrected
(see :py:func:`pingouin.sphericity`).
The default for two-way design is to return both the uncorrected and
Greenhouse-Geisser corrected p-values. Note that sphericity test for
two-way design are not currently implemented in Pingouin.
detailed : boolean
If True, return a full ANOVA table.
effsize : string
Effect size. Must be one of 'np2' (partial eta-squared), 'n2'
(eta-squared) or 'ng2'(generalized eta-squared, default). Note that for
one-way repeated measure ANOVA, eta-squared is the same as the generalized eta-squared.
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANOVA summary:
* ``'Source'``: Name of the within-group factor
* ``'ddof1'``: Degrees of freedom (numerator)
* ``'ddof2'``: Degrees of freedom (denominator)
* ``'F'``: F-value
* ``'p-unc'``: Uncorrected p-value
* ``'ng2'``: Generalized eta-square effect size
* ``'eps'``: Greenhouse-Geisser epsilon factor (= index of sphericity)
* ``'p-GG-corr'``: Greenhouse-Geisser corrected p-value
* ``'W-spher'``: Sphericity test statistic
* ``'p-spher'``: p-value of the sphericity test
* ``'sphericity'``: sphericity of the data (boolean)
See Also
--------
anova : One-way and N-way ANOVA
mixed_anova : Two way mixed ANOVA
friedman : Non-parametric one-way repeated measures ANOVA
Notes
-----
Data can be in wide or long format for one-way repeated measures ANOVA but
*must* be in long format for two-way repeated measures ANOVA.
In one-way repeated-measures ANOVA, the total variance (sums of squares)
is divided into three components
.. math::
SS_{\text{total}} = SS_{\text{effect}} +
(SS_{\text{subjects}} + SS_{\text{error}})
with
.. math::
SS_{\text{total}} = \sum_i^r \sum_j^n (Y_{ij} - \overline{Y})^2
SS_{\text{effect}} = \sum_i^r n_i(\overline{Y_i} - \overline{Y})^2
SS_{\text{subjects}} = r\sum (\overline{Y}_s - \overline{Y})^2
SS_{\text{error}} = SS_{\text{total}} - SS_{\text{effect}} -
SS_{\text{subjects}}
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of
conditions, :math:`n_i` the number of observations for each condition,
:math:`\overline{Y}` the grand mean of the data, :math:`\overline{Y_i}`
the mean of the :math:`i^{th}` condition and :math:`\overline{Y}_{subj}`
the mean of the :math:`s^{th}` subject.
The F-statistics is then defined as:
.. math::
F^* = \frac{MS_{\text{effect}}}{MS_{\text{error}}} =
\frac{\frac{SS_{\text{effect}}}
{r-1}}{\frac{SS_{\text{error}}}{(n - 1)(r - 1)}}
and the p-value can be calculated using a F-distribution with
:math:`v_{\text{effect}} = r - 1` and
:math:`v_{\text{error}} = (n - 1)(r - 1)` degrees of freedom.
The default effect size reported in Pingouin is the generalized eta-squared,
which is equivalent to eta-squared for one-way repeated measures ANOVA.
.. math::
\eta_g^2 = \frac{SS_{\text{effect}}}{SS_{\text{total}}}
The partial eta-squared is defined as:
.. math::
\eta_p^2 = \frac{SS_{\text{effect}}}{SS_{\text{effect}} + SS_{\text{error}}}
Missing values are automatically removed using a strict listwise approach (= complete-case
analysis). In other words, any subject with one or more missing value(s) is completely removed
from the dataframe prior to running the test. This could drastically decrease the power of the
ANOVA if many missing values are present. In that case, we strongly recommend using linear
mixed effect modelling, which can handle missing values in repeated measures.
.. warning:: The epsilon adjustement factor of the interaction in
two-way repeated measures ANOVA where both factors have more than
two levels slightly differs than from R and JASP.
Please always make sure to double-check your results with another
software.
.. warning:: Sphericity tests for the interaction term of a two-way
repeated measures ANOVA are not currently supported in Pingouin.
Instead, please refer to the Greenhouse-Geisser epsilon value
(a value close to 1 indicates that sphericity is met.) For more
details, see :py:func:`pingouin.sphericity`.
Examples
--------
1. One-way repeated measures ANOVA using a wide-format dataset
>>> import pingouin as pg
>>> data = pg.read_dataset('rm_anova_wide')
>>> pg.rm_anova(data)
Source ddof1 ddof2 F p-unc ng2 eps
0 Within 3 24 5.200652 0.006557 0.346392 0.694329
2. One-way repeated-measures ANOVA using a long-format dataset.
We're also specifying two additional options here: ``detailed=True`` means
that we'll get a more detailed ANOVA table, and ``effsize='np2'``
means that we want to get the partial eta-squared effect size instead
of the default (generalized) eta-squared.
>>> df = pg.read_dataset('rm_anova')
>>> aov = pg.rm_anova(dv='DesireToKill', within='Disgustingness',
... subject='Subject', data=df, detailed=True, effsize="np2")
>>> aov.round(3)
Source SS DF MS F p-unc np2 eps
0 Disgustingness 27.485 1 27.485 12.044 0.001 0.116 1.0
1 Error 209.952 92 2.282 NaN NaN NaN NaN
3. Two-way repeated-measures ANOVA
>>> aov = pg.rm_anova(dv='DesireToKill', within=['Disgustingness', 'Frighteningness'],
... subject='Subject', data=df)
4. As a :py:class:`pandas.DataFrame` method
>>> df.rm_anova(dv='DesireToKill', within='Disgustingness', subject='Subject', detailed=False)
Source ddof1 ddof2 F p-unc ng2 eps
0 Disgustingness 1 92 12.043878 0.000793 0.025784 1.0
| @pf.register_dataframe_method
def rm_anova(
data=None, dv=None, within=None, subject=None, correction="auto", detailed=False, effsize="ng2"
):
"""One-way and two-way repeated measures ANOVA.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame. Note that this function can also directly be used as a
:py:class:`pandas.DataFrame` method, in which case this argument is no
longer needed.
Both wide and long-format dataframe are supported for one-way repeated
measures ANOVA. However, ``data`` must be in long format for two-way
repeated measures.
dv : string
Name of column containing the dependent variable (only required if
``data`` is in long format).
within : string or list of string
Name of column containing the within factor (only required if ``data``
is in long format).
If ``within`` is a single string, then compute a one-way repeated
measures ANOVA, if ``within`` is a list with two strings,
compute a two-way repeated measures ANOVA.
subject : string
Name of column containing the subject identifier (only required if
``data`` is in long format).
correction : string or boolean
If True, also return the Greenhouse-Geisser corrected p-value.
The default for one-way design is to compute Mauchly's test of
sphericity to determine whether the p-values needs to be corrected
(see :py:func:`pingouin.sphericity`).
The default for two-way design is to return both the uncorrected and
Greenhouse-Geisser corrected p-values. Note that sphericity test for
two-way design are not currently implemented in Pingouin.
detailed : boolean
If True, return a full ANOVA table.
effsize : string
Effect size. Must be one of 'np2' (partial eta-squared), 'n2'
(eta-squared) or 'ng2'(generalized eta-squared, default). Note that for
one-way repeated measure ANOVA, eta-squared is the same as the generalized eta-squared.
Returns
-------
aov : :py:class:`pandas.DataFrame`
ANOVA summary:
* ``'Source'``: Name of the within-group factor
* ``'ddof1'``: Degrees of freedom (numerator)
* ``'ddof2'``: Degrees of freedom (denominator)
* ``'F'``: F-value
* ``'p-unc'``: Uncorrected p-value
* ``'ng2'``: Generalized eta-square effect size
* ``'eps'``: Greenhouse-Geisser epsilon factor (= index of sphericity)
* ``'p-GG-corr'``: Greenhouse-Geisser corrected p-value
* ``'W-spher'``: Sphericity test statistic
* ``'p-spher'``: p-value of the sphericity test
* ``'sphericity'``: sphericity of the data (boolean)
See Also
--------
anova : One-way and N-way ANOVA
mixed_anova : Two way mixed ANOVA
friedman : Non-parametric one-way repeated measures ANOVA
Notes
-----
Data can be in wide or long format for one-way repeated measures ANOVA but
*must* be in long format for two-way repeated measures ANOVA.
In one-way repeated-measures ANOVA, the total variance (sums of squares)
is divided into three components
.. math::
SS_{\\text{total}} = SS_{\\text{effect}} +
(SS_{\\text{subjects}} + SS_{\\text{error}})
with
.. math::
SS_{\\text{total}} = \\sum_i^r \\sum_j^n (Y_{ij} - \\overline{Y})^2
SS_{\\text{effect}} = \\sum_i^r n_i(\\overline{Y_i} - \\overline{Y})^2
SS_{\\text{subjects}} = r\\sum (\\overline{Y}_s - \\overline{Y})^2
SS_{\\text{error}} = SS_{\\text{total}} - SS_{\\text{effect}} -
SS_{\\text{subjects}}
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of
conditions, :math:`n_i` the number of observations for each condition,
:math:`\\overline{Y}` the grand mean of the data, :math:`\\overline{Y_i}`
the mean of the :math:`i^{th}` condition and :math:`\\overline{Y}_{subj}`
the mean of the :math:`s^{th}` subject.
The F-statistics is then defined as:
.. math::
F^* = \\frac{MS_{\\text{effect}}}{MS_{\\text{error}}} =
\\frac{\\frac{SS_{\\text{effect}}}
{r-1}}{\\frac{SS_{\\text{error}}}{(n - 1)(r - 1)}}
and the p-value can be calculated using a F-distribution with
:math:`v_{\\text{effect}} = r - 1` and
:math:`v_{\\text{error}} = (n - 1)(r - 1)` degrees of freedom.
The default effect size reported in Pingouin is the generalized eta-squared,
which is equivalent to eta-squared for one-way repeated measures ANOVA.
.. math::
\\eta_g^2 = \\frac{SS_{\\text{effect}}}{SS_{\\text{total}}}
The partial eta-squared is defined as:
.. math::
\\eta_p^2 = \\frac{SS_{\\text{effect}}}{SS_{\\text{effect}} + SS_{\\text{error}}}
Missing values are automatically removed using a strict listwise approach (= complete-case
analysis). In other words, any subject with one or more missing value(s) is completely removed
from the dataframe prior to running the test. This could drastically decrease the power of the
ANOVA if many missing values are present. In that case, we strongly recommend using linear
mixed effect modelling, which can handle missing values in repeated measures.
.. warning:: The epsilon adjustement factor of the interaction in
two-way repeated measures ANOVA where both factors have more than
two levels slightly differs than from R and JASP.
Please always make sure to double-check your results with another
software.
.. warning:: Sphericity tests for the interaction term of a two-way
repeated measures ANOVA are not currently supported in Pingouin.
Instead, please refer to the Greenhouse-Geisser epsilon value
(a value close to 1 indicates that sphericity is met.) For more
details, see :py:func:`pingouin.sphericity`.
Examples
--------
1. One-way repeated measures ANOVA using a wide-format dataset
>>> import pingouin as pg
>>> data = pg.read_dataset('rm_anova_wide')
>>> pg.rm_anova(data)
Source ddof1 ddof2 F p-unc ng2 eps
0 Within 3 24 5.200652 0.006557 0.346392 0.694329
2. One-way repeated-measures ANOVA using a long-format dataset.
We're also specifying two additional options here: ``detailed=True`` means
that we'll get a more detailed ANOVA table, and ``effsize='np2'``
means that we want to get the partial eta-squared effect size instead
of the default (generalized) eta-squared.
>>> df = pg.read_dataset('rm_anova')
>>> aov = pg.rm_anova(dv='DesireToKill', within='Disgustingness',
... subject='Subject', data=df, detailed=True, effsize="np2")
>>> aov.round(3)
Source SS DF MS F p-unc np2 eps
0 Disgustingness 27.485 1 27.485 12.044 0.001 0.116 1.0
1 Error 209.952 92 2.282 NaN NaN NaN NaN
3. Two-way repeated-measures ANOVA
>>> aov = pg.rm_anova(dv='DesireToKill', within=['Disgustingness', 'Frighteningness'],
... subject='Subject', data=df)
4. As a :py:class:`pandas.DataFrame` method
>>> df.rm_anova(dv='DesireToKill', within='Disgustingness', subject='Subject', detailed=False)
Source ddof1 ddof2 F p-unc ng2 eps
0 Disgustingness 1 92 12.043878 0.000793 0.025784 1.0
"""
assert effsize in ["n2", "np2", "ng2"], "effsize must be n2, np2 or ng2."
if isinstance(within, list):
assert len(within) > 0, "Within cannot be empty."
if len(within) == 1:
within = within[0]
elif len(within) == 2:
return rm_anova2(dv=dv, within=within, data=data, subject=subject, effsize=effsize)
else:
raise ValueError("Repeated measures ANOVA with three or more factors is not supported.")
# Convert from wide to long-format, if needed
if all([v is None for v in [dv, within, subject]]):
assert isinstance(data, pd.DataFrame)
data = data._get_numeric_data().dropna() # Listwise deletion of missing values
assert data.shape[0] > 2, "Data must have at least 3 non-missing rows."
assert data.shape[1] > 1, "Data must contain at least two columns."
data["Subj"] = np.arange(data.shape[0])
data = data.melt(id_vars="Subj", var_name="Within", value_name="DV")
subject, within, dv = "Subj", "Within", "DV"
# Check dataframe
data = _check_dataframe(dv=dv, within=within, data=data, subject=subject, effects="within")
assert not data[within].isnull().any(), "Cannot have missing values in `within`."
assert not data[subject].isnull().any(), "Cannot have missing values in `subject`."
# Pivot and melt the table. This has several effects:
# 1) Force missing values to be explicit (a NaN cell is created)
# 2) Automatic collapsing to the mean if multiple within factors are present
# 3) If using dropna, remove rows with missing values (listwise deletion).
# The latter is the same behavior as JASP (= strict complete-case analysis).
data_piv = data.pivot_table(index=subject, columns=within, values=dv, observed=True)
data_piv = data_piv.dropna()
data = data_piv.melt(ignore_index=False, value_name=dv).reset_index()
# Groupby
# I think that observed=True is actually not needed here since we have already used
# `observed=True` in pivot_table.
grp_with = data.groupby(within, observed=True, group_keys=False)[dv]
rm = list(data[within].unique())
n_rm = len(rm)
n_obs = int(grp_with.count().max())
grandmean = data[dv].mean(numeric_only=True)
# Calculate sums of squares
ss_with = ((grp_with.mean(numeric_only=True) - grandmean) ** 2 * grp_with.count()).sum()
ss_resall = grp_with.apply(lambda x: (x - x.mean()) ** 2).sum()
# sstotal = sstime + ss_resall = sstime + (sssubj + sserror)
# ss_total = ((data[dv] - grandmean)**2).sum()
# We can further divide the residuals into a within and between component:
grp_subj = data.groupby(subject, observed=True)[dv]
ss_resbetw = n_rm * np.sum((grp_subj.mean(numeric_only=True) - grandmean) ** 2)
ss_reswith = ss_resall - ss_resbetw
# Calculate degrees of freedom
ddof1 = n_rm - 1
ddof2 = ddof1 * (n_obs - 1)
# Calculate MS, F and p-values
ms_with = ss_with / ddof1
ms_reswith = ss_reswith / ddof2
fval = ms_with / ms_reswith
p_unc = f(ddof1, ddof2).sf(fval)
# Calculating effect sizes (see Bakeman 2005; Lakens 2013)
# https://github.com/raphaelvallat/pingouin/issues/251
if effsize == "np2":
# Partial eta-squared
ef = ss_with / (ss_with + ss_reswith)
else:
# (Generalized) eta-squared, ng2 == n2
ef = ss_with / (ss_with + ss_resall)
# Compute sphericity using Mauchly test, on the wide-format dataframe
# Sphericity assumption only applies if there are more than 2 levels
if correction == "auto" or (correction is True and n_rm >= 3):
spher, W_spher, _, _, p_spher = sphericity(data_piv, alpha=0.05)
if correction == "auto":
correction = True if not spher else False
else:
correction = False
# Compute epsilon adjustement factor
eps = epsilon(data_piv, correction="gg")
# If required, apply Greenhouse-Geisser correction for sphericity
if correction:
corr_ddof1, corr_ddof2 = (np.maximum(d * eps, 1.0) for d in (ddof1, ddof2))
p_corr = f(corr_ddof1, corr_ddof2).sf(fval)
# Create output dataframe
if not detailed:
aov = pd.DataFrame(
{
"Source": within,
"ddof1": ddof1,
"ddof2": ddof2,
"F": fval,
"p-unc": p_unc,
effsize: ef,
"eps": eps,
},
index=[0],
)
if correction:
aov["p-GG-corr"] = p_corr
aov["W-spher"] = W_spher
aov["p-spher"] = p_spher
aov["sphericity"] = spher
col_order = [
"Source",
"ddof1",
"ddof2",
"F",
"p-unc",
"p-GG-corr",
effsize,
"eps",
"sphericity",
"W-spher",
"p-spher",
]
else:
aov = pd.DataFrame(
{
"Source": [within, "Error"],
"SS": [ss_with, ss_reswith],
"DF": [ddof1, ddof2],
"MS": [ms_with, ms_reswith],
"F": [fval, np.nan],
"p-unc": [p_unc, np.nan],
effsize: [ef, np.nan],
"eps": [eps, np.nan],
}
)
if correction:
aov["p-GG-corr"] = [p_corr, np.nan]
aov["W-spher"] = [W_spher, np.nan]
aov["p-spher"] = [p_spher, np.nan]
aov["sphericity"] = [spher, np.nan]
col_order = [
"Source",
"SS",
"DF",
"MS",
"F",
"p-unc",
"p-GG-corr",
effsize,
"eps",
"sphericity",
"W-spher",
"p-spher",
]
aov = aov.reindex(columns=col_order)
aov.dropna(how="all", axis=1, inplace=True)
return _postprocess_dataframe(aov)
| (data=None, dv=None, within=None, subject=None, correction='auto', detailed=False, effsize='ng2') |
32,060 | pingouin.correlation | rm_corr | Repeated measures correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'r'``: Repeated measures correlation coefficient
* ``'dof'``: Degrees of freedom
* ``'pval'``: p-value
* ``'CI95'``: 95% parametric confidence intervals
* ``'power'``: achieved power of the test (= 1 - type II error).
See also
--------
plot_rm_corr
Notes
-----
Repeated measures correlation (rmcorr) is a statistical technique for determining the common
within-individual association for paired measures assessed on two or more occasions for
multiple individuals.
From `Bakdash and Marusich (2017)
<https://doi.org/10.3389/fpsyg.2017.00456>`_:
*Rmcorr accounts for non-independence among observations using analysis
of covariance (ANCOVA) to statistically adjust for inter-individual
variability. By removing measured variance between-participants,
rmcorr provides the best linear fit for each participant using parallel
regression lines (the same slope) with varying intercepts.
Like a Pearson correlation coefficient, the rmcorr coefficient
is bounded by − 1 to 1 and represents the strength of the linear
association between two variables.*
Results have been tested against the `rmcorr <https://github.com/cran/rmcorr>`_ R package.
Missing values are automatically removed from the dataframe (listwise deletion).
Examples
--------
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
r dof pval CI95% power
rm_corr -0.50677 38 0.000847 [-0.71, -0.23] 0.929579
Now plot using the :py:func:`pingouin.plot_rm_corr` function:
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> g = pg.plot_rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
| def rm_corr(data=None, x=None, y=None, subject=None):
"""Repeated measures correlation.
Parameters
----------
data : :py:class:`pandas.DataFrame`
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'r'``: Repeated measures correlation coefficient
* ``'dof'``: Degrees of freedom
* ``'pval'``: p-value
* ``'CI95'``: 95% parametric confidence intervals
* ``'power'``: achieved power of the test (= 1 - type II error).
See also
--------
plot_rm_corr
Notes
-----
Repeated measures correlation (rmcorr) is a statistical technique for determining the common
within-individual association for paired measures assessed on two or more occasions for
multiple individuals.
From `Bakdash and Marusich (2017)
<https://doi.org/10.3389/fpsyg.2017.00456>`_:
*Rmcorr accounts for non-independence among observations using analysis
of covariance (ANCOVA) to statistically adjust for inter-individual
variability. By removing measured variance between-participants,
rmcorr provides the best linear fit for each participant using parallel
regression lines (the same slope) with varying intercepts.
Like a Pearson correlation coefficient, the rmcorr coefficient
is bounded by − 1 to 1 and represents the strength of the linear
association between two variables.*
Results have been tested against the `rmcorr <https://github.com/cran/rmcorr>`_ R package.
Missing values are automatically removed from the dataframe (listwise deletion).
Examples
--------
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
r dof pval CI95% power
rm_corr -0.50677 38 0.000847 [-0.71, -0.23] 0.929579
Now plot using the :py:func:`pingouin.plot_rm_corr` function:
.. plot::
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> g = pg.plot_rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
"""
from pingouin import ancova, power_corr
# Safety checks
assert isinstance(data, pd.DataFrame), "Data must be a DataFrame"
assert x in data.columns, "The %s column is not in data." % x
assert y in data.columns, "The %s column is not in data." % y
assert data[x].dtype.kind in "bfiu", "%s must be numeric." % x
assert data[y].dtype.kind in "bfiu", "%s must be numeric." % y
assert subject in data.columns, "The %s column is not in data." % subject
if data[subject].nunique() < 3:
raise ValueError("rm_corr requires at least 3 unique subjects.")
# Remove missing values
data = data[[x, y, subject]].dropna(axis=0)
# Using PINGOUIN
# For max precision, make sure rounding is disabled
old_options = options.copy()
options["round"] = None
aov = ancova(dv=y, covar=x, between=subject, data=data)
options.update(old_options) # restore options
bw = aov.bw_ # Beta within parameter
sign = np.sign(bw)
dof = int(aov.at[2, "DF"])
n = dof + 2
ssfactor = aov.at[1, "SS"]
sserror = aov.at[2, "SS"]
rm = sign * np.sqrt(ssfactor / (ssfactor + sserror))
pval = aov.at[1, "p-unc"]
ci = compute_esci(stat=rm, nx=n, eftype="pearson").tolist()
pwr = power_corr(r=rm, n=n, alternative="two-sided")
# Convert to Dataframe
stats = pd.DataFrame(
{"r": rm, "dof": int(dof), "pval": pval, "CI95%": [ci], "power": pwr}, index=["rm_corr"]
)
return _postprocess_dataframe(stats)
| (data=None, x=None, y=None, subject=None) |
32,061 | pingouin.config | set_default_options | Reset Pingouin's default global options (e.g. rounding).
.. versionadded:: 0.3.8
| def set_default_options():
"""Reset Pingouin's default global options (e.g. rounding).
.. versionadded:: 0.3.8
"""
options.clear()
# Rounding behavior
options["round"] = None
options["round.column.CI95%"] = 2
# default is to return Bayes factors inside DataFrames as formatted str
options["round.column.BF10"] = _format_bf
| () |
32,062 | pingouin.distribution | sphericity | Mauchly and JNS test for sphericity.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame containing the repeated measurements.
Both wide and long-format dataframe are supported for this function.
To test for an interaction term between two repeated measures factors
with a wide-format dataframe, ``data`` must have a two-levels
:py:class:`pandas.MultiIndex` columns.
dv : string
Name of column containing the dependent variable (only required if
``data`` is in long format).
within : string
Name of column containing the within factor (only required if ``data``
is in long format).
If ``within`` is a list with two strings, this function computes
the epsilon factor for the interaction between the two within-subject
factor.
subject : string
Name of column containing the subject identifier (only required if
``data`` is in long format).
method : str
Method to compute sphericity:
* `'jns'`: John, Nagao and Sugiura test.
* `'mauchly'`: Mauchly test (default).
alpha : float
Significance level
Returns
-------
spher : boolean
True if data have the sphericity property.
W : float
Test statistic.
chi2 : float
Chi-square statistic.
dof : int
Degrees of freedom.
pval : float
P-value.
Raises
------
ValueError
When testing for an interaction, if both within-subject factors have
more than 2 levels (not yet supported in Pingouin).
See Also
--------
epsilon : Epsilon adjustement factor for repeated measures.
homoscedasticity : Test equality of variance.
normality : Univariate normality test.
Notes
-----
The **Mauchly** :math:`W` statistic [1]_ is defined by:
.. math::
W = \frac{\prod \lambda_j}{(\frac{1}{k-1} \sum \lambda_j)^{k-1}}
where :math:`\lambda_j` are the eigenvalues of the population
covariance matrix (= double-centered sample covariance matrix) and
:math:`k` is the number of conditions.
From then, the :math:`W` statistic is transformed into a chi-square
score using the number of observations per condition :math:`n`
.. math:: f = \frac{2(k-1)^2+k+1}{6(k-1)(n-1)}
.. math:: \chi_w^2 = (f-1)(n-1) \text{log}(W)
The p-value is then approximated using a chi-square distribution:
.. math:: \chi_w^2 \sim \chi^2(\frac{k(k-1)}{2}-1)
The **JNS** :math:`V` statistic ([2]_, [3]_, [4]_) is defined by:
.. math::
V = \frac{(\sum_j^{k-1} \lambda_j)^2}{\sum_j^{k-1} \lambda_j^2}
.. math:: \chi_v^2 = \frac{n}{2} (k-1)^2 (V - \frac{1}{k-1})
and the p-value approximated using a chi-square distribution
.. math:: \chi_v^2 \sim \chi^2(\frac{k(k-1)}{2}-1)
Missing values are automatically removed from ``data`` (listwise deletion).
References
----------
.. [1] Mauchly, J. W. (1940). Significance test for sphericity of a normal
n-variate distribution. The Annals of Mathematical Statistics,
11(2), 204-209.
.. [2] Nagao, H. (1973). On some test criteria for covariance matrix.
The Annals of Statistics, 700-709.
.. [3] Sugiura, N. (1972). Locally best invariant test for sphericity and
the limiting distributions. The Annals of Mathematical Statistics,
1312-1316.
.. [4] John, S. (1972). The distribution of a statistic used for testing
sphericity of normal distributions. Biometrika, 59(1), 169-173.
See also http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
Mauchly test for sphericity using a wide-format dataframe
>>> import pandas as pd
>>> import pingouin as pg
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> spher, W, chisq, dof, pval = pg.sphericity(data)
>>> print(spher, round(W, 3), round(chisq, 3), dof, round(pval, 3))
True 0.21 4.677 2 0.096
John, Nagao and Sugiura (JNS) test
>>> round(pg.sphericity(data, method='jns')[-1], 3) # P-value only
0.046
Now using a long-format dataframe
>>> data = pg.read_dataset('rm_anova2')
>>> data.head()
Subject Time Metric Performance
0 1 Pre Product 13
1 2 Pre Product 12
2 3 Pre Product 17
3 4 Pre Product 12
4 5 Pre Product 19
Let's first test sphericity for the *Time* within-subject factor
>>> pg.sphericity(data, dv='Performance', subject='Subject',
... within='Time')
(True, nan, nan, 1, 1.0)
Since *Time* has only two levels (Pre and Post), the sphericity assumption
is necessarily met.
The *Metric* factor, however, has three levels:
>>> round(pg.sphericity(data, dv='Performance', subject='Subject',
... within=['Metric'])[-1], 3)
0.878
The p-value value is very large, and the test therefore indicates that
there is no violation of sphericity.
Now, let's calculate the epsilon for the interaction between the two
repeated measures factor. The current implementation in Pingouin only works
if at least one of the two within-subject factors has no more than two
levels.
>>> spher, _, chisq, dof, pval = pg.sphericity(data, dv='Performance',
... subject='Subject',
... within=['Time', 'Metric'])
>>> print(spher, round(chisq, 3), dof, round(pval, 3))
True 3.763 2 0.152
Here again, there is no violation of sphericity acccording to Mauchly's
test.
Alternatively, we could use a wide-format dataframe with two column
levels:
>>> # Pivot from long-format to wide-format
>>> piv = data.pivot(index='Subject', columns=['Time', 'Metric'], values='Performance')
>>> piv.head()
Time Pre Post
Metric Product Client Action Product Client Action
Subject
1 13 12 17 18 30 34
2 12 19 18 6 18 30
3 17 19 24 21 31 32
4 12 25 25 18 39 40
5 19 27 19 18 28 27
>>> spher, _, chisq, dof, pval = pg.sphericity(piv)
>>> print(spher, round(chisq, 3), dof, round(pval, 3))
True 3.763 2 0.152
which gives the same output as the long-format dataframe.
| def sphericity(data, dv=None, within=None, subject=None, method="mauchly", alpha=0.05):
"""Mauchly and JNS test for sphericity.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame containing the repeated measurements.
Both wide and long-format dataframe are supported for this function.
To test for an interaction term between two repeated measures factors
with a wide-format dataframe, ``data`` must have a two-levels
:py:class:`pandas.MultiIndex` columns.
dv : string
Name of column containing the dependent variable (only required if
``data`` is in long format).
within : string
Name of column containing the within factor (only required if ``data``
is in long format).
If ``within`` is a list with two strings, this function computes
the epsilon factor for the interaction between the two within-subject
factor.
subject : string
Name of column containing the subject identifier (only required if
``data`` is in long format).
method : str
Method to compute sphericity:
* `'jns'`: John, Nagao and Sugiura test.
* `'mauchly'`: Mauchly test (default).
alpha : float
Significance level
Returns
-------
spher : boolean
True if data have the sphericity property.
W : float
Test statistic.
chi2 : float
Chi-square statistic.
dof : int
Degrees of freedom.
pval : float
P-value.
Raises
------
ValueError
When testing for an interaction, if both within-subject factors have
more than 2 levels (not yet supported in Pingouin).
See Also
--------
epsilon : Epsilon adjustement factor for repeated measures.
homoscedasticity : Test equality of variance.
normality : Univariate normality test.
Notes
-----
The **Mauchly** :math:`W` statistic [1]_ is defined by:
.. math::
W = \\frac{\\prod \\lambda_j}{(\\frac{1}{k-1} \\sum \\lambda_j)^{k-1}}
where :math:`\\lambda_j` are the eigenvalues of the population
covariance matrix (= double-centered sample covariance matrix) and
:math:`k` is the number of conditions.
From then, the :math:`W` statistic is transformed into a chi-square
score using the number of observations per condition :math:`n`
.. math:: f = \\frac{2(k-1)^2+k+1}{6(k-1)(n-1)}
.. math:: \\chi_w^2 = (f-1)(n-1) \\text{log}(W)
The p-value is then approximated using a chi-square distribution:
.. math:: \\chi_w^2 \\sim \\chi^2(\\frac{k(k-1)}{2}-1)
The **JNS** :math:`V` statistic ([2]_, [3]_, [4]_) is defined by:
.. math::
V = \\frac{(\\sum_j^{k-1} \\lambda_j)^2}{\\sum_j^{k-1} \\lambda_j^2}
.. math:: \\chi_v^2 = \\frac{n}{2} (k-1)^2 (V - \\frac{1}{k-1})
and the p-value approximated using a chi-square distribution
.. math:: \\chi_v^2 \\sim \\chi^2(\\frac{k(k-1)}{2}-1)
Missing values are automatically removed from ``data`` (listwise deletion).
References
----------
.. [1] Mauchly, J. W. (1940). Significance test for sphericity of a normal
n-variate distribution. The Annals of Mathematical Statistics,
11(2), 204-209.
.. [2] Nagao, H. (1973). On some test criteria for covariance matrix.
The Annals of Statistics, 700-709.
.. [3] Sugiura, N. (1972). Locally best invariant test for sphericity and
the limiting distributions. The Annals of Mathematical Statistics,
1312-1316.
.. [4] John, S. (1972). The distribution of a statistic used for testing
sphericity of normal distributions. Biometrika, 59(1), 169-173.
See also http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
Mauchly test for sphericity using a wide-format dataframe
>>> import pandas as pd
>>> import pingouin as pg
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> spher, W, chisq, dof, pval = pg.sphericity(data)
>>> print(spher, round(W, 3), round(chisq, 3), dof, round(pval, 3))
True 0.21 4.677 2 0.096
John, Nagao and Sugiura (JNS) test
>>> round(pg.sphericity(data, method='jns')[-1], 3) # P-value only
0.046
Now using a long-format dataframe
>>> data = pg.read_dataset('rm_anova2')
>>> data.head()
Subject Time Metric Performance
0 1 Pre Product 13
1 2 Pre Product 12
2 3 Pre Product 17
3 4 Pre Product 12
4 5 Pre Product 19
Let's first test sphericity for the *Time* within-subject factor
>>> pg.sphericity(data, dv='Performance', subject='Subject',
... within='Time')
(True, nan, nan, 1, 1.0)
Since *Time* has only two levels (Pre and Post), the sphericity assumption
is necessarily met.
The *Metric* factor, however, has three levels:
>>> round(pg.sphericity(data, dv='Performance', subject='Subject',
... within=['Metric'])[-1], 3)
0.878
The p-value value is very large, and the test therefore indicates that
there is no violation of sphericity.
Now, let's calculate the epsilon for the interaction between the two
repeated measures factor. The current implementation in Pingouin only works
if at least one of the two within-subject factors has no more than two
levels.
>>> spher, _, chisq, dof, pval = pg.sphericity(data, dv='Performance',
... subject='Subject',
... within=['Time', 'Metric'])
>>> print(spher, round(chisq, 3), dof, round(pval, 3))
True 3.763 2 0.152
Here again, there is no violation of sphericity acccording to Mauchly's
test.
Alternatively, we could use a wide-format dataframe with two column
levels:
>>> # Pivot from long-format to wide-format
>>> piv = data.pivot(index='Subject', columns=['Time', 'Metric'], values='Performance')
>>> piv.head()
Time Pre Post
Metric Product Client Action Product Client Action
Subject
1 13 12 17 18 30 34
2 12 19 18 6 18 30
3 17 19 24 21 31 32
4 12 25 25 18 39 40
5 19 27 19 18 28 27
>>> spher, _, chisq, dof, pval = pg.sphericity(piv)
>>> print(spher, round(chisq, 3), dof, round(pval, 3))
True 3.763 2 0.152
which gives the same output as the long-format dataframe.
"""
assert isinstance(data, pd.DataFrame), "Data must be a pandas Dataframe."
# If data is in long-format, convert to wide-format
if all([v is not None for v in [dv, within, subject]]):
data = _long_to_wide_rm(data, dv=dv, within=within, subject=subject)
# From now on we assume that data is in wide-format and contains only
# the relevant columns.
# Remove rows with missing values in wide-format dataframe
data = data.dropna()
# Support for two-way factor of shape (2, N)
data = _check_multilevel_rm(data, func="mauchly")
# From here, we work only with one-way design
n, k = data.shape
d = k - 1
# Sphericity is always met with only two repeated measures.
if k <= 2:
return True, np.nan, np.nan, 1, 1.0
# Compute dof of the test
ddof = (d * (d + 1)) / 2 - 1
ddof = 1 if ddof == 0 else ddof
if method.lower() == "mauchly":
# Method 1. Contrast matrix. Similar to R & Matlab implementation.
# Only works for one-way design or two-way design with shape (2, N).
# 1 - Compute the successive difference matrix Z.
# (Note that the order of columns does not matter.)
# 2 - Find the contrast matrix that M so that data * M = Z
# 3 - Performs the QR decomposition of this matrix (= contrast matrix)
# 4 - Compute sample covariance matrix S
# 5 - Compute Mauchly's statistic
# Z = data.diff(axis=1).dropna(axis=1)
# M = np.linalg.lstsq(data, Z, rcond=None)[0]
# C, _ = np.linalg.qr(M)
# S = data.cov(numeric_only=True)
# A = C.T.dot(S).dot(C)
# logW = np.log(np.linalg.det(A)) - d * np.log(np.trace(A / d))
# W = np.exp(logW)
# Method 2. Eigenvalue-based method. Faster.
# 1 - Estimate the population covariance (= double-centered)
# 2 - Calculate n-1 eigenvalues
# 3 - Compute Mauchly's statistic
S = data.cov(numeric_only=True).to_numpy() # NumPy, otherwise S.mean() != grandmean
S_pop = S - S.mean(0)[:, None] - S.mean(1)[None, :] + S.mean()
eig = np.linalg.eigvalsh(S_pop)[1:]
eig = eig[eig > 0.001] # Additional check to remove very low eig
W = np.prod(eig) / (eig.sum() / d) ** d
logW = np.log(W)
# Compute chi-square and p-value (adapted from the ezANOVA R package)
f = 1 - (2 * d**2 + d + 2) / (6 * d * (n - 1))
w2 = (
(d + 2)
* (d - 1)
* (d - 2)
* (2 * d**3 + 6 * d**2 + 3 * k + 2)
/ (288 * ((n - 1) * d * f) ** 2)
)
chi_sq = -(n - 1) * f * logW
p1 = scipy.stats.chi2.sf(chi_sq, ddof)
p2 = scipy.stats.chi2.sf(chi_sq, ddof + 4)
pval = p1 + w2 * (p2 - p1)
else:
# Method = JNS
eps = epsilon(data, correction="gg")
W = eps * d
chi_sq = 0.5 * n * d**2 * (W - 1 / d)
pval = scipy.stats.chi2.sf(chi_sq, ddof)
spher = True if pval > alpha else False
SpherResults = namedtuple("SpherResults", ["spher", "W", "chi2", "dof", "pval"])
return SpherResults(spher, W, chi_sq, int(ddof), pval)
| (data, dv=None, within=None, subject=None, method='mauchly', alpha=0.05) |
32,063 | pingouin.equivalence | tost | Two One-Sided Test (TOST) for equivalence.
Parameters
----------
x, y : array_like
First and second set of observations. ``x`` and ``y`` should have the
same units. If ``y`` is a single value (e.g. 0), a one-sample test is
performed.
bound : float
Magnitude of region of similarity (a.k.a epsilon). Note that this
should be expressed in the same unit as ``x`` and ``y``.
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent.
correction : auto or boolean
Specify whether or not to correct for unequal variances using Welch
separate variances T-test. This only applies if ``paired`` is False.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'bound'``: bound (= epsilon, or equivalence margin)
* ``'dof'``: degrees of freedom
* ``'pval'``: TOST p-value
See also
--------
ttest
References
----------
.. [1] Schuirmann, D.L. 1981. On hypothesis testing to determine if the
mean of a normal distribution is contained in a known interval.
Biometrics 37 617.
.. [2] https://cran.r-project.org/web/packages/equivalence/equivalence.pdf
Examples
--------
1. Independent two-sample TOST with a region of similarity of 1 (default)
>>> import pingouin as pg
>>> a = [4, 7, 8, 6, 3, 2]
>>> b = [6, 8, 7, 10, 11, 9]
>>> pg.tost(a, b)
bound dof pval
TOST 1 10 0.965097
2. Paired TOST with a different region of similarity
>>> pg.tost(a, b, bound=0.5, paired=True)
bound dof pval
TOST 0.5 5 0.954854
3. One sample TOST
>>> pg.tost(a, y=0, bound=4)
bound dof pval
TOST 4 5 0.825967
| def tost(x, y, bound=1, paired=False, correction=False):
"""Two One-Sided Test (TOST) for equivalence.
Parameters
----------
x, y : array_like
First and second set of observations. ``x`` and ``y`` should have the
same units. If ``y`` is a single value (e.g. 0), a one-sample test is
performed.
bound : float
Magnitude of region of similarity (a.k.a epsilon). Note that this
should be expressed in the same unit as ``x`` and ``y``.
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent.
correction : auto or boolean
Specify whether or not to correct for unequal variances using Welch
separate variances T-test. This only applies if ``paired`` is False.
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'bound'``: bound (= epsilon, or equivalence margin)
* ``'dof'``: degrees of freedom
* ``'pval'``: TOST p-value
See also
--------
ttest
References
----------
.. [1] Schuirmann, D.L. 1981. On hypothesis testing to determine if the
mean of a normal distribution is contained in a known interval.
Biometrics 37 617.
.. [2] https://cran.r-project.org/web/packages/equivalence/equivalence.pdf
Examples
--------
1. Independent two-sample TOST with a region of similarity of 1 (default)
>>> import pingouin as pg
>>> a = [4, 7, 8, 6, 3, 2]
>>> b = [6, 8, 7, 10, 11, 9]
>>> pg.tost(a, b)
bound dof pval
TOST 1 10 0.965097
2. Paired TOST with a different region of similarity
>>> pg.tost(a, b, bound=0.5, paired=True)
bound dof pval
TOST 0.5 5 0.954854
3. One sample TOST
>>> pg.tost(a, y=0, bound=4)
bound dof pval
TOST 4 5 0.825967
"""
x = np.asarray(x)
y = np.asarray(y)
assert isinstance(bound, (int, float)), "bound must be int or float."
# T-tests
df_a = ttest(x + bound, y, paired=paired, correction=correction, alternative="greater")
df_b = ttest(x - bound, y, paired=paired, correction=correction, alternative="less")
pval = max(df_a.at["T-test", "p-val"], df_b.at["T-test", "p-val"])
# Create output dataframe
stats = pd.DataFrame(
{"bound": bound, "dof": df_a.at["T-test", "dof"], "pval": pval}, index=["TOST"]
)
return _postprocess_dataframe(stats)
| (x, y, bound=1, paired=False, correction=False) |
32,064 | pingouin.parametric | ttest | T-test.
Parameters
----------
x : array_like
First set of observations.
y : array_like or float
Second set of observations. If ``y`` is a single value, a one-sample
T-test is computed against that value (= "mu" in the t.test R
function).
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return one-sided
p-values. "greater" tests against the alternative hypothesis that the mean of ``x``
is greater than the mean of ``y``.
correction : string or boolean
For unpaired two sample T-tests, specify whether or not to correct for
unequal variances using Welch separate variances T-test. If 'auto', it
will automatically uses Welch T-test when the sample sizes are unequal,
as recommended by Zimmerman 2004.
r : float
Cauchy scale factor for computing the Bayes Factor.
Smaller values of r (e.g. 0.5), may be appropriate when small effect
sizes are expected a priori; larger values of r are appropriate when
large effect sizes are expected (Rouder et al 2009).
The default is 0.707 (= :math:`\sqrt{2} / 2`).
confidence : float
Confidence level for the confidence intervals (0.95 = 95%)
.. versionadded:: 0.3.9
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'T'``: T-value
* ``'dof'``: degrees of freedom
* ``'alternative'``: alternative of the test
* ``'p-val'``: p-value
* ``'CI95%'``: confidence intervals of the difference in means
* ``'cohen-d'``: Cohen d effect size
* ``'BF10'``: Bayes Factor of the alternative hypothesis
* ``'power'``: achieved power of the test ( = 1 - type II error)
See also
--------
mwu, wilcoxon, anova, rm_anova, pairwise_tests, compute_effsize
Notes
-----
Missing values are automatically removed from the data. If ``x`` and
``y`` are paired, the entire row is removed (= listwise deletion).
The **T-value for unpaired samples** is defined as:
.. math::
t = \frac{\overline{x} - \overline{y}}
{\sqrt{\frac{s^{2}_{x}}{n_{x}} + \frac{s^{2}_{y}}{n_{y}}}}
where :math:`\overline{x}` and :math:`\overline{y}` are the sample means,
:math:`n_{x}` and :math:`n_{y}` are the sample sizes, and
:math:`s^{2}_{x}` and :math:`s^{2}_{y}` are the sample variances.
The degrees of freedom :math:`v` are :math:`n_x + n_y - 2` when the sample
sizes are equal. When the sample sizes are unequal or when
:code:`correction=True`, the Welch–Satterthwaite equation is used to
approximate the adjusted degrees of freedom:
.. math::
v = \frac{(\frac{s^{2}_{x}}{n_{x}} + \frac{s^{2}_{y}}{n_{y}})^{2}}
{\frac{(\frac{s^{2}_{x}}{n_{x}})^{2}}{(n_{x}-1)} +
\frac{(\frac{s^{2}_{y}}{n_{y}})^{2}}{(n_{y}-1)}}
The p-value is then calculated using a T distribution with :math:`v`
degrees of freedom.
The T-value for **paired samples** is defined by:
.. math:: t = \frac{\overline{x}_d}{s_{\overline{x}}}
where
.. math:: s_{\overline{x}} = \frac{s_d}{\sqrt n}
where :math:`\overline{x}_d` is the sample mean of the differences
between the two paired samples, :math:`n` is the number of observations
(sample size), :math:`s_d` is the sample standard deviation of the
differences and :math:`s_{\overline{x}}` is the estimated standard error
of the mean of the differences. The p-value is then calculated using a
T-distribution with :math:`n-1` degrees of freedom.
The scaled Jeffrey-Zellner-Siow (JZS) Bayes Factor is approximated
using the :py:func:`pingouin.bayesfactor_ttest` function.
Results have been tested against JASP and the `t.test` R function.
References
----------
* https://www.itl.nist.gov/div898/handbook/eda/section3/eda353.htm
* Delacre, M., Lakens, D., & Leys, C. (2017). Why psychologists should
by default use Welch’s t-test instead of Student’s t-test.
International Review of Social Psychology, 30(1).
* Zimmerman, D. W. (2004). A note on preliminary tests of equality of
variances. British Journal of Mathematical and Statistical
Psychology, 57(1), 173-181.
* Rouder, J.N., Speckman, P.L., Sun, D., Morey, R.D., Iverson, G.,
2009. Bayesian t tests for accepting and rejecting the null
hypothesis. Psychon. Bull. Rev. 16, 225–237.
https://doi.org/10.3758/PBR.16.2.225
Examples
--------
1. One-sample T-test.
>>> from pingouin import ttest
>>> x = [5.5, 2.4, 6.8, 9.6, 4.2]
>>> ttest(x, 4).round(2)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 1.4 4 two-sided 0.23 [2.32, 9.08] 0.62 0.766 0.19
2. One sided paired T-test.
>>> pre = [5.5, 2.4, 6.8, 9.6, 4.2]
>>> post = [6.4, 3.4, 6.4, 11., 4.8]
>>> ttest(pre, post, paired=True, alternative='less').round(2)
T dof alternative p-val CI95% cohen-d BF10 power
T-test -2.31 4 less 0.04 [-inf, -0.05] 0.25 3.122 0.12
Now testing the opposite alternative hypothesis
>>> ttest(pre, post, paired=True, alternative='greater').round(2)
T dof alternative p-val CI95% cohen-d BF10 power
T-test -2.31 4 greater 0.96 [-1.35, inf] 0.25 0.32 0.02
3. Paired T-test with missing values.
>>> import numpy as np
>>> pre = [5.5, 2.4, np.nan, 9.6, 4.2]
>>> post = [6.4, 3.4, 6.4, 11., 4.8]
>>> ttest(pre, post, paired=True).round(3)
T dof alternative p-val CI95% cohen-d BF10 power
T-test -5.902 3 two-sided 0.01 [-1.5, -0.45] 0.306 7.169 0.073
Compare with SciPy
>>> from scipy.stats import ttest_rel
>>> np.round(ttest_rel(pre, post, nan_policy="omit"), 3)
array([-5.902, 0.01 ])
4. Independent two-sample T-test with equal sample size.
>>> np.random.seed(123)
>>> x = np.random.normal(loc=7, size=20)
>>> y = np.random.normal(loc=4, size=20)
>>> ttest(x, y)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 9.106452 38 two-sided 4.306971e-11 [2.64, 4.15] 2.879713 1.366e+08 1.0
5. Independent two-sample T-test with unequal sample size. A Welch's T-test is used.
>>> np.random.seed(123)
>>> y = np.random.normal(loc=6.5, size=15)
>>> ttest(x, y)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 1.996537 31.567592 two-sided 0.054561 [-0.02, 1.65] 0.673518 1.469 0.481867
6. However, the Welch's correction can be disabled:
>>> ttest(x, y, correction=False)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 1.971859 33 two-sided 0.057056 [-0.03, 1.66] 0.673518 1.418 0.481867
Compare with SciPy
>>> from scipy.stats import ttest_ind
>>> np.round(ttest_ind(x, y, equal_var=True), 6) # T value and p-value
array([1.971859, 0.057056])
| def ttest(x, y, paired=False, alternative="two-sided", correction="auto", r=0.707, confidence=0.95):
"""T-test.
Parameters
----------
x : array_like
First set of observations.
y : array_like or float
Second set of observations. If ``y`` is a single value, a one-sample
T-test is computed against that value (= "mu" in the t.test R
function).
paired : boolean
Specify whether the two observations are related (i.e. repeated
measures) or independent.
alternative : string
Defines the alternative hypothesis, or tail of the test. Must be one of
"two-sided" (default), "greater" or "less". Both "greater" and "less" return one-sided
p-values. "greater" tests against the alternative hypothesis that the mean of ``x``
is greater than the mean of ``y``.
correction : string or boolean
For unpaired two sample T-tests, specify whether or not to correct for
unequal variances using Welch separate variances T-test. If 'auto', it
will automatically uses Welch T-test when the sample sizes are unequal,
as recommended by Zimmerman 2004.
r : float
Cauchy scale factor for computing the Bayes Factor.
Smaller values of r (e.g. 0.5), may be appropriate when small effect
sizes are expected a priori; larger values of r are appropriate when
large effect sizes are expected (Rouder et al 2009).
The default is 0.707 (= :math:`\\sqrt{2} / 2`).
confidence : float
Confidence level for the confidence intervals (0.95 = 95%)
.. versionadded:: 0.3.9
Returns
-------
stats : :py:class:`pandas.DataFrame`
* ``'T'``: T-value
* ``'dof'``: degrees of freedom
* ``'alternative'``: alternative of the test
* ``'p-val'``: p-value
* ``'CI95%'``: confidence intervals of the difference in means
* ``'cohen-d'``: Cohen d effect size
* ``'BF10'``: Bayes Factor of the alternative hypothesis
* ``'power'``: achieved power of the test ( = 1 - type II error)
See also
--------
mwu, wilcoxon, anova, rm_anova, pairwise_tests, compute_effsize
Notes
-----
Missing values are automatically removed from the data. If ``x`` and
``y`` are paired, the entire row is removed (= listwise deletion).
The **T-value for unpaired samples** is defined as:
.. math::
t = \\frac{\\overline{x} - \\overline{y}}
{\\sqrt{\\frac{s^{2}_{x}}{n_{x}} + \\frac{s^{2}_{y}}{n_{y}}}}
where :math:`\\overline{x}` and :math:`\\overline{y}` are the sample means,
:math:`n_{x}` and :math:`n_{y}` are the sample sizes, and
:math:`s^{2}_{x}` and :math:`s^{2}_{y}` are the sample variances.
The degrees of freedom :math:`v` are :math:`n_x + n_y - 2` when the sample
sizes are equal. When the sample sizes are unequal or when
:code:`correction=True`, the Welch–Satterthwaite equation is used to
approximate the adjusted degrees of freedom:
.. math::
v = \\frac{(\\frac{s^{2}_{x}}{n_{x}} + \\frac{s^{2}_{y}}{n_{y}})^{2}}
{\\frac{(\\frac{s^{2}_{x}}{n_{x}})^{2}}{(n_{x}-1)} +
\\frac{(\\frac{s^{2}_{y}}{n_{y}})^{2}}{(n_{y}-1)}}
The p-value is then calculated using a T distribution with :math:`v`
degrees of freedom.
The T-value for **paired samples** is defined by:
.. math:: t = \\frac{\\overline{x}_d}{s_{\\overline{x}}}
where
.. math:: s_{\\overline{x}} = \\frac{s_d}{\\sqrt n}
where :math:`\\overline{x}_d` is the sample mean of the differences
between the two paired samples, :math:`n` is the number of observations
(sample size), :math:`s_d` is the sample standard deviation of the
differences and :math:`s_{\\overline{x}}` is the estimated standard error
of the mean of the differences. The p-value is then calculated using a
T-distribution with :math:`n-1` degrees of freedom.
The scaled Jeffrey-Zellner-Siow (JZS) Bayes Factor is approximated
using the :py:func:`pingouin.bayesfactor_ttest` function.
Results have been tested against JASP and the `t.test` R function.
References
----------
* https://www.itl.nist.gov/div898/handbook/eda/section3/eda353.htm
* Delacre, M., Lakens, D., & Leys, C. (2017). Why psychologists should
by default use Welch’s t-test instead of Student’s t-test.
International Review of Social Psychology, 30(1).
* Zimmerman, D. W. (2004). A note on preliminary tests of equality of
variances. British Journal of Mathematical and Statistical
Psychology, 57(1), 173-181.
* Rouder, J.N., Speckman, P.L., Sun, D., Morey, R.D., Iverson, G.,
2009. Bayesian t tests for accepting and rejecting the null
hypothesis. Psychon. Bull. Rev. 16, 225–237.
https://doi.org/10.3758/PBR.16.2.225
Examples
--------
1. One-sample T-test.
>>> from pingouin import ttest
>>> x = [5.5, 2.4, 6.8, 9.6, 4.2]
>>> ttest(x, 4).round(2)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 1.4 4 two-sided 0.23 [2.32, 9.08] 0.62 0.766 0.19
2. One sided paired T-test.
>>> pre = [5.5, 2.4, 6.8, 9.6, 4.2]
>>> post = [6.4, 3.4, 6.4, 11., 4.8]
>>> ttest(pre, post, paired=True, alternative='less').round(2)
T dof alternative p-val CI95% cohen-d BF10 power
T-test -2.31 4 less 0.04 [-inf, -0.05] 0.25 3.122 0.12
Now testing the opposite alternative hypothesis
>>> ttest(pre, post, paired=True, alternative='greater').round(2)
T dof alternative p-val CI95% cohen-d BF10 power
T-test -2.31 4 greater 0.96 [-1.35, inf] 0.25 0.32 0.02
3. Paired T-test with missing values.
>>> import numpy as np
>>> pre = [5.5, 2.4, np.nan, 9.6, 4.2]
>>> post = [6.4, 3.4, 6.4, 11., 4.8]
>>> ttest(pre, post, paired=True).round(3)
T dof alternative p-val CI95% cohen-d BF10 power
T-test -5.902 3 two-sided 0.01 [-1.5, -0.45] 0.306 7.169 0.073
Compare with SciPy
>>> from scipy.stats import ttest_rel
>>> np.round(ttest_rel(pre, post, nan_policy="omit"), 3)
array([-5.902, 0.01 ])
4. Independent two-sample T-test with equal sample size.
>>> np.random.seed(123)
>>> x = np.random.normal(loc=7, size=20)
>>> y = np.random.normal(loc=4, size=20)
>>> ttest(x, y)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 9.106452 38 two-sided 4.306971e-11 [2.64, 4.15] 2.879713 1.366e+08 1.0
5. Independent two-sample T-test with unequal sample size. A Welch's T-test is used.
>>> np.random.seed(123)
>>> y = np.random.normal(loc=6.5, size=15)
>>> ttest(x, y)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 1.996537 31.567592 two-sided 0.054561 [-0.02, 1.65] 0.673518 1.469 0.481867
6. However, the Welch's correction can be disabled:
>>> ttest(x, y, correction=False)
T dof alternative p-val CI95% cohen-d BF10 power
T-test 1.971859 33 two-sided 0.057056 [-0.03, 1.66] 0.673518 1.418 0.481867
Compare with SciPy
>>> from scipy.stats import ttest_ind
>>> np.round(ttest_ind(x, y, equal_var=True), 6) # T value and p-value
array([1.971859, 0.057056])
"""
from scipy.stats import t, ttest_rel, ttest_ind, ttest_1samp
try: # pragma: no cover
from scipy.stats._stats_py import _unequal_var_ttest_denom, _equal_var_ttest_denom
except ImportError: # pragma: no cover
# Fallback for scipy<1.8.0
from scipy.stats.stats import _unequal_var_ttest_denom, _equal_var_ttest_denom
from pingouin import power_ttest, power_ttest2n, compute_effsize
# Check arguments
assert alternative in [
"two-sided",
"greater",
"less",
], "Alternative must be one of 'two-sided' (default), 'greater' or 'less'."
assert 0 < confidence < 1, "confidence must be between 0 and 1."
x = np.asarray(x)
y = np.asarray(y)
if x.size != y.size and paired:
warnings.warn("x and y have unequal sizes. Switching to paired == False. Check your data.")
paired = False
# Remove rows with missing values
x, y = remove_na(x, y, paired=paired)
nx, ny = x.size, y.size
if ny == 1:
# Case one sample T-test
tval, pval = ttest_1samp(x, y, alternative=alternative)
# Some versions of scipy return an array for the t-value
if isinstance(tval, Iterable):
tval = tval[0]
dof = nx - 1
se = np.sqrt(x.var(ddof=1) / nx)
if ny > 1 and paired is True:
# Case paired two samples T-test
# Do not compute if two arrays are identical (avoid SciPy warning)
if np.array_equal(x, y):
warnings.warn("x and y are equals. Cannot compute T or p-value.")
tval, pval = np.nan, np.nan
else:
tval, pval = ttest_rel(x, y, alternative=alternative)
dof = nx - 1
se = np.sqrt(np.var(x - y, ddof=1) / nx)
bf = bayesfactor_ttest(tval, nx, ny, paired=True, r=r)
elif ny > 1 and paired is False:
dof = nx + ny - 2
vx, vy = x.var(ddof=1), y.var(ddof=1)
# Case unpaired two samples T-test
if correction is True or (correction == "auto" and nx != ny):
# Use the Welch separate variance T-test
tval, pval = ttest_ind(x, y, equal_var=False, alternative=alternative)
# Compute sample standard deviation
# dof are approximated using Welch–Satterthwaite equation
dof, se = _unequal_var_ttest_denom(vx, nx, vy, ny)
else:
tval, pval = ttest_ind(x, y, equal_var=True, alternative=alternative)
_, se = _equal_var_ttest_denom(vx, nx, vy, ny)
# Effect size
d = compute_effsize(x, y, paired=paired, eftype="cohen")
# Confidence interval for the (difference in) means
# Compare to the t.test r function
if alternative == "two-sided":
alpha = 1 - confidence
conf = 1 - alpha / 2 # 0.975
else:
conf = confidence
tcrit = t.ppf(conf, dof)
ci = np.array([tval - tcrit, tval + tcrit]) * se
if ny == 1:
ci += y
if alternative == "greater":
ci[1] = np.inf
elif alternative == "less":
ci[0] = -np.inf
# Rename CI
ci_name = "CI%.0f%%" % (100 * confidence)
# Achieved power
if ny == 1:
# One-sample
power = power_ttest(
d=d, n=nx, power=None, alpha=0.05, contrast="one-sample", alternative=alternative
)
if ny > 1 and paired is True:
# Paired two-sample
power = power_ttest(
d=d, n=nx, power=None, alpha=0.05, contrast="paired", alternative=alternative
)
elif ny > 1 and paired is False:
# Independent two-samples
if nx == ny:
# Equal sample sizes
power = power_ttest(
d=d, n=nx, power=None, alpha=0.05, contrast="two-samples", alternative=alternative
)
else:
# Unequal sample sizes
power = power_ttest2n(nx, ny, d=d, power=None, alpha=0.05, alternative=alternative)
# Bayes factor
bf = bayesfactor_ttest(tval, nx, ny, paired=paired, alternative=alternative, r=r)
# Create output dictionnary
stats = {
"dof": dof,
"T": tval,
"p-val": pval,
"alternative": alternative,
"cohen-d": abs(d),
ci_name: [ci],
"power": power,
"BF10": bf,
}
# Convert to dataframe
col_order = ["T", "dof", "alternative", "p-val", ci_name, "cohen-d", "BF10", "power"]
stats = pd.DataFrame(stats, columns=col_order, index=["T-test"])
return _postprocess_dataframe(stats)
| (x, y, paired=False, alternative='two-sided', correction='auto', r=0.707, confidence=0.95) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.