diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__init__.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea73ff0cde75e1d54f359f427ee09f6b6cefa287 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d52ff6fb10c7a6042e2e9601c567350f72178cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py new file mode 100644 index 0000000000000000000000000000000000000000..47a3146d2ccbe7d670eb67206655f746ecdfc5be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py @@ -0,0 +1,2607 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# WARNING +# This entire file has been adapted from the sync-client code in `src/huggingface_hub/inference/_client.py`. +# Any change in InferenceClient will be automatically reflected in AsyncInferenceClient. +# To re-generate the code, run `make style` or `python ./utils/generate_async_inference_client.py --update`. +# WARNING +import asyncio +import base64 +import logging +import re +import time +import warnings +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterable, + Dict, + List, + Literal, + Optional, + Union, + overload, +) + +from requests.structures import CaseInsensitiveDict + +from huggingface_hub.constants import ALL_INFERENCE_API_FRAMEWORKS, INFERENCE_ENDPOINT, MAIN_INFERENCE_API_FRAMEWORKS +from huggingface_hub.errors import InferenceTimeoutError +from huggingface_hub.inference._common import ( + TASKS_EXPECTING_IMAGES, + ContentT, + ModelStatus, + _async_stream_chat_completion_response_from_bytes, + _async_stream_text_generation_response, + _b64_encode, + _b64_to_image, + _bytes_to_dict, + _bytes_to_image, + _bytes_to_list, + _fetch_recommended_models, + _get_unsupported_text_generation_kwargs, + _import_numpy, + _is_chat_completion_server, + _open_as_binary, + _set_as_non_chat_completion_server, + _set_unsupported_text_generation_kwargs, + raise_text_generation_error, +) +from huggingface_hub.inference._generated.types import ( + AudioClassificationOutputElement, + AudioToAudioOutputElement, + AutomaticSpeechRecognitionOutput, + ChatCompletionInputTool, + ChatCompletionInputToolTypeClass, + ChatCompletionOutput, + ChatCompletionOutputComplete, + ChatCompletionOutputMessage, + ChatCompletionStreamOutput, + DocumentQuestionAnsweringOutputElement, + FillMaskOutputElement, + ImageClassificationOutputElement, + ImageSegmentationOutputElement, + ImageToTextOutput, + ObjectDetectionOutputElement, + QuestionAnsweringOutputElement, + SummarizationOutput, + TableQuestionAnsweringOutputElement, + TextClassificationOutputElement, + TextGenerationInputGrammarType, + TextGenerationOutput, + TextGenerationStreamOutput, + TokenClassificationOutputElement, + TranslationOutput, + VisualQuestionAnsweringOutputElement, + ZeroShotClassificationOutputElement, + ZeroShotImageClassificationOutputElement, +) +from huggingface_hub.inference._generated.types.chat_completion import ChatCompletionInputToolTypeEnum +from huggingface_hub.inference._types import ( + ConversationalOutput, # soon to be removed +) +from huggingface_hub.utils import ( + build_hf_headers, +) + +from .._common import _async_yield_from, _import_aiohttp + + +if TYPE_CHECKING: + import numpy as np + from PIL.Image import Image + +logger = logging.getLogger(__name__) + + +MODEL_KWARGS_NOT_USED_REGEX = re.compile(r"The following `model_kwargs` are not used by the model: \[(.*?)\]") + + +class AsyncInferenceClient: + """ + Initialize a new Inference Client. + + [`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used + seamlessly with either the (free) Inference API or self-hosted Inference Endpoints. + + Args: + model (`str`, `optional`): + The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `bigcode/starcoder` + or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is + automatically selected for the task. + token (`str` or `bool`, *optional*): + Hugging Face token. Will default to the locally saved token if not provided. + Pass `token=False` if you don't want to send your token to the server. + timeout (`float`, `optional`): + The maximum number of seconds to wait for a response from the server. Loading a new model in Inference + API can take up to several minutes. Defaults to None, meaning it will loop until the server is available. + headers (`Dict[str, str]`, `optional`): + Additional headers to send to the server. By default only the authorization and user-agent headers are sent. + Values in this dictionary will override the default values. + cookies (`Dict[str, str]`, `optional`): + Additional cookies to send to the server. + """ + + def __init__( + self, + model: Optional[str] = None, + token: Union[str, bool, None] = None, + timeout: Optional[float] = None, + headers: Optional[Dict[str, str]] = None, + cookies: Optional[Dict[str, str]] = None, + ) -> None: + self.model: Optional[str] = model + self.token: Union[str, bool, None] = token + self.headers = CaseInsensitiveDict(build_hf_headers(token=token)) # contains 'authorization' + 'user-agent' + if headers is not None: + self.headers.update(headers) + self.cookies = cookies + self.timeout = timeout + + def __repr__(self): + return f"" + + @overload + async def post( # type: ignore[misc] + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: Literal[False] = ..., + ) -> bytes: ... + + @overload + async def post( # type: ignore[misc] + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: Literal[True] = ..., + ) -> AsyncIterable[bytes]: ... + + @overload + async def post( + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: bool = False, + ) -> Union[bytes, AsyncIterable[bytes]]: ... + + async def post( + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: bool = False, + ) -> Union[bytes, AsyncIterable[bytes]]: + """ + Make a POST request to the inference server. + + Args: + json (`Union[str, Dict, List]`, *optional*): + The JSON data to send in the request body, specific to each task. Defaults to None. + data (`Union[str, Path, bytes, BinaryIO]`, *optional*): + The content to send in the request body, specific to each task. + It can be raw bytes, a pointer to an opened file, a local file path, + or a URL to an online resource (image, audio file,...). If both `json` and `data` are passed, + `data` will take precedence. At least `json` or `data` must be provided. Defaults to None. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. Will override the model defined at the instance level. Defaults to None. + task (`str`, *optional*): + The task to perform on the inference. All available tasks can be found + [here](https://huggingface.co/tasks). Used only to default to a recommended model if `model` is not + provided. At least `model` or `task` must be provided. Defaults to None. + stream (`bool`, *optional*): + Whether to iterate over streaming APIs. + + Returns: + bytes: The raw bytes returned by the server. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + """ + + aiohttp = _import_aiohttp() + + url = self._resolve_url(model, task) + + if data is not None and json is not None: + warnings.warn("Ignoring `json` as `data` is passed as binary.") + + # Set Accept header if relevant + headers = self.headers.copy() + if task in TASKS_EXPECTING_IMAGES and "Accept" not in headers: + headers["Accept"] = "image/png" + + t0 = time.time() + timeout = self.timeout + while True: + with _open_as_binary(data) as data_as_binary: + # Do not use context manager as we don't want to close the connection immediately when returning + # a stream + client = aiohttp.ClientSession( + headers=headers, cookies=self.cookies, timeout=aiohttp.ClientTimeout(self.timeout) + ) + + try: + response = await client.post(url, json=json, data=data_as_binary) + response_error_payload = None + if response.status != 200: + try: + response_error_payload = await response.json() # get payload before connection closed + except Exception: + pass + response.raise_for_status() + if stream: + return _async_yield_from(client, response) + else: + content = await response.read() + await client.close() + return content + except asyncio.TimeoutError as error: + await client.close() + # Convert any `TimeoutError` to a `InferenceTimeoutError` + raise InferenceTimeoutError(f"Inference call timed out: {url}") from error # type: ignore + except aiohttp.ClientResponseError as error: + error.response_error_payload = response_error_payload + await client.close() + if response.status == 422 and task is not None: + error.message += f". Make sure '{task}' task is supported by the model." + if response.status == 503: + # If Model is unavailable, either raise a TimeoutError... + if timeout is not None and time.time() - t0 > timeout: + raise InferenceTimeoutError( + f"Model not loaded on the server: {url}. Please retry with a higher timeout" + f" (current: {self.timeout}).", + request=error.request, + response=error.response, + ) from error + # ...or wait 1s and retry + logger.info(f"Waiting for model to be loaded on the server: {error}") + time.sleep(1) + if timeout is not None: + timeout = max(self.timeout - (time.time() - t0), 1) # type: ignore + continue + raise error + + async def audio_classification( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> List[AudioClassificationOutputElement]: + """ + Perform audio classification on the provided audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio classification will be used. + + Returns: + `List[AudioClassificationOutputElement]`: List of [`AudioClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.audio_classification("audio.flac") + [ + AudioClassificationOutputElement(score=0.4976358711719513, label='hap'), + AudioClassificationOutputElement(score=0.3677836060523987, label='neu'), + ... + ] + ``` + """ + response = await self.post(data=audio, model=model, task="audio-classification") + return AudioClassificationOutputElement.parse_obj_as_list(response) + + async def audio_to_audio( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> List[AudioToAudioOutputElement]: + """ + Performs multiple tasks related to audio-to-audio depending on the model (eg: speech enhancement, source separation). + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content for the model. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model can be any model which takes an audio file and returns another audio file. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio_to_audio will be used. + + Returns: + `List[AudioToAudioOutputElement]`: A list of [`AudioToAudioOutputElement`] items containing audios label, content-type, and audio content in blob. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> audio_output = await client.audio_to_audio("audio.flac") + >>> async for i, item in enumerate(audio_output): + >>> with open(f"output_{i}.flac", "wb") as f: + f.write(item.blob) + ``` + """ + response = await self.post(data=audio, model=model, task="audio-to-audio") + audio_output = AudioToAudioOutputElement.parse_obj_as_list(response) + for item in audio_output: + item.blob = base64.b64decode(item.blob) + return audio_output + + async def automatic_speech_recognition( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> AutomaticSpeechRecognitionOutput: + """ + Perform automatic speech recognition (ASR or audio-to-text) on the given audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file. + model (`str`, *optional*): + The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for ASR will be used. + + Returns: + [`AutomaticSpeechRecognitionOutput`]: An item containing the transcribed text and optionally the timestamp chunks. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.automatic_speech_recognition("hello_world.flac").text + "hello world" + ``` + """ + response = await self.post(data=audio, model=model, task="automatic-speech-recognition") + return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response) + + @overload + async def chat_completion( # type: ignore + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: Literal[False] = False, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, ChatCompletionInputToolTypeEnum]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + ) -> ChatCompletionOutput: ... + + @overload + async def chat_completion( # type: ignore + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: Literal[True] = True, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, ChatCompletionInputToolTypeEnum]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + ) -> AsyncIterable[ChatCompletionStreamOutput]: ... + + @overload + async def chat_completion( + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: bool = False, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, ChatCompletionInputToolTypeEnum]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + ) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: ... + + async def chat_completion( + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: bool = False, + # Parameters from ChatCompletionInput (handled manually) + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, ChatCompletionInputToolTypeEnum]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + ) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: + """ + A method for completing conversations using a specified language model. + + + + If the model is served by a server supporting chat-completion, the method will directly call the server's + `/v1/chat/completions` endpoint. If the server does not support chat-completion, the method will render the + chat template client-side based on the information fetched from the Hub API. In this case, you will need to + have `minijinja` template engine installed. Run `pip install "huggingface_hub[inference]"` or `pip install minijinja` + to install it. + + + + Args: + messages (List[Union[`SystemMessage`, `UserMessage`, `AssistantMessage`]]): + Conversation history consisting of roles and content pairs. + model (`str`, *optional*): + The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. + See https://huggingface.co/tasks/text-generation for more details. + frequency_penalty (`float`, *optional*): + Penalizes new tokens based on their existing frequency + in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. + logit_bias (`List[float]`, *optional*): + Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens + (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, + the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, + but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should + result in a ban or exclusive selection of the relevant token. Defaults to None. + logprobs (`bool`, *optional*): + Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message. + max_tokens (`int`, *optional*): + Maximum number of tokens allowed in the response. Defaults to 20. + n (`int`, *optional*): + UNUSED. + presence_penalty (`float`, *optional*): + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the + text so far, increasing the model's likelihood to talk about new topics. + seed (Optional[`int`], *optional*): + Seed for reproducible control flow. Defaults to None. + stop (Optional[`str`], *optional*): + Up to four strings which trigger the end of the response. + Defaults to None. + stream (`bool`, *optional*): + Enable realtime streaming of responses. Defaults to False. + temperature (`float`, *optional*): + Controls randomness of the generations. Lower values ensure + less random completions. Range: [0, 2]. Defaults to 1.0. + top_logprobs (`int`, *optional*): + An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. logprobs must be set to true if this parameter is + used. + top_p (`float`, *optional*): + Fraction of the most likely next words to sample from. + Must be between 0 and 1. Defaults to 1.0. + tool_choice ([`ChatCompletionInputToolTypeClass`] or [`ChatCompletionInputToolTypeEnum`], *optional*): + The tool to use for the completion. Defaults to "auto". + tool_prompt (`str`, *optional*): + A prompt to be appended before the tools. + tools (List of [`ChatCompletionInputTool`], *optional*): + A list of tools the model may call. Currently, only functions are supported as a tool. Use this to + provide a list of functions the model may generate JSON inputs for. + + Returns: + [`ChatCompletionOutput] or Iterable of [`ChatCompletionStreamOutput`]: + Generated text returned from the server: + - if `stream=False`, the generated text is returned as a [`ChatCompletionOutput`] (default). + - if `stream=True`, the generated text is returned token by token as a sequence of [`ChatCompletionStreamOutput`]. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + + ```py + # Must be run in an async context + # Chat example + >>> from huggingface_hub import AsyncInferenceClient + >>> messages = [{"role": "user", "content": "What is the capital of France?"}] + >>> client = AsyncInferenceClient("HuggingFaceH4/zephyr-7b-beta") + >>> await client.chat_completion(messages, max_tokens=100) + ChatCompletionOutput( + choices=[ + ChatCompletionOutputComplete( + finish_reason='eos_token', + index=0, + message=ChatCompletionOutputMessage( + content='The capital of France is Paris. The official name of the city is Ville de Paris (City of Paris) and the name of the country governing body, which is located in Paris, is La République française (The French Republic). \nI hope that helps! Let me know if you need any further information.' + ) + ) + ], + created=1710498360 + ) + + >>> async for token in await client.chat_completion(messages, max_tokens=10, stream=True): + ... print(token) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content='The', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' capital', role='assistant'), index=0, finish_reason=None)], created=1710498504) + (...) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' may', role='assistant'), index=0, finish_reason=None)], created=1710498504) + + # Chat example with tools + >>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> messages = [ + ... { + ... "role": "system", + ... "content": "Don't make assumptions about what values to plug into functions. Ask async for clarification if a user request is ambiguous.", + ... }, + ... { + ... "role": "user", + ... "content": "What's the weather like the next 3 days in San Francisco, CA?", + ... }, + ... ] + >>> tools = [ + ... { + ... "type": "function", + ... "function": { + ... "name": "get_current_weather", + ... "description": "Get the current weather", + ... "parameters": { + ... "type": "object", + ... "properties": { + ... "location": { + ... "type": "string", + ... "description": "The city and state, e.g. San Francisco, CA", + ... }, + ... "format": { + ... "type": "string", + ... "enum": ["celsius", "fahrenheit"], + ... "description": "The temperature unit to use. Infer this from the users location.", + ... }, + ... }, + ... "required": ["location", "format"], + ... }, + ... }, + ... }, + ... { + ... "type": "function", + ... "function": { + ... "name": "get_n_day_weather_forecast", + ... "description": "Get an N-day weather forecast", + ... "parameters": { + ... "type": "object", + ... "properties": { + ... "location": { + ... "type": "string", + ... "description": "The city and state, e.g. San Francisco, CA", + ... }, + ... "format": { + ... "type": "string", + ... "enum": ["celsius", "fahrenheit"], + ... "description": "The temperature unit to use. Infer this from the users location.", + ... }, + ... "num_days": { + ... "type": "integer", + ... "description": "The number of days to forecast", + ... }, + ... }, + ... "required": ["location", "format", "num_days"], + ... }, + ... }, + ... }, + ... ] + + >>> response = await client.chat_completion( + ... model="meta-llama/Meta-Llama-3-70B-Instruct", + ... messages=messages, + ... tools=tools, + ... tool_choice="auto", + ... max_tokens=500, + ... ) + >>> response.choices[0].message.tool_calls[0].function + ChatCompletionOutputFunctionDefinition( + arguments={ + 'location': 'San Francisco, CA', + 'format': 'fahrenheit', + 'num_days': 3 + }, + name='get_n_day_weather_forecast', + description=None + ) + ``` + """ + # determine model + model = model or self.model or self.get_recommended_model("text-generation") + + if _is_chat_completion_server(model): + # First, let's consider the server has a `/v1/chat/completions` endpoint. + # If that's the case, we don't have to render the chat template client-side. + model_url = self._resolve_url(model) + if not model_url.endswith("/chat/completions"): + model_url += "/v1/chat/completions" + + try: + data = await self.post( + model=model_url, + json=dict( + model="tgi", # random string + messages=messages, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + max_tokens=max_tokens, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + temperature=temperature, + tool_choice=tool_choice, + tool_prompt=tool_prompt, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + stream=stream, + ), + stream=stream, + ) + except _import_aiohttp().ClientResponseError as e: + if e.status in (400, 404, 500): + # Let's consider the server is not a chat completion server. + # Then we call again `chat_completion` which will render the chat template client side. + # (can be HTTP 500, HTTP 400, HTTP 404 depending on the server) + _set_as_non_chat_completion_server(model) + logger.warning( + f"Server {model_url} does not seem to support chat completion. Falling back to text generation. Error: {e}" + ) + return await self.chat_completion( + messages=messages, + model=model, + stream=stream, + max_tokens=max_tokens, + seed=seed, + stop=stop, + temperature=temperature, + top_p=top_p, + ) + raise + + if stream: + return _async_stream_chat_completion_response_from_bytes(data) # type: ignore[arg-type] + + return ChatCompletionOutput.parse_obj_as_instance(data) # type: ignore[arg-type] + + # At this point, we know the server is not a chat completion server. + # It means it's a transformers-backed server for which we can send a list of messages directly to the + # `text-generation` pipeline. We won't receive a detailed response but only the generated text. + if stream: + raise ValueError( + "Streaming token is not supported by the model. This is due to the model not been served by a " + "Text-Generation-Inference server. Please pass `stream=False` as input." + ) + if tool_choice is not None or tool_prompt is not None or tools is not None: + warnings.warn( + "Tools are not supported by the model. This is due to the model not been served by a " + "Text-Generation-Inference server. The provided tool parameters will be ignored." + ) + + # generate response + text_generation_output = await self.text_generation( + prompt=messages, # type: ignore # Not correct type but works implicitly + model=model, + stream=False, + details=False, + max_new_tokens=max_tokens, + seed=seed, + stop_sequences=stop, + temperature=temperature, + top_p=top_p, + ) + + # Format as a ChatCompletionOutput with dummy values for fields we can't provide + return ChatCompletionOutput( + id="dummy", + model="dummy", + object="dummy", + system_fingerprint="dummy", + usage=None, # type: ignore # set to `None` as we don't want to provide false information + created=int(time.time()), + choices=[ + ChatCompletionOutputComplete( + finish_reason="unk", # type: ignore # set to `unk` as we don't want to provide false information + index=0, + message=ChatCompletionOutputMessage( + content=text_generation_output, + role="assistant", + ), + ) + ], + ) + + async def conversational( + self, + text: str, + generated_responses: Optional[List[str]] = None, + past_user_inputs: Optional[List[str]] = None, + *, + parameters: Optional[Dict[str, Any]] = None, + model: Optional[str] = None, + ) -> ConversationalOutput: + """ + Generate conversational responses based on the given input text (i.e. chat with the API). + + + + [`InferenceClient.conversational`] API is deprecated and will be removed in a future release. Please use + [`InferenceClient.chat_completion`] instead. + + + + Args: + text (`str`): + The last input from the user in the conversation. + generated_responses (`List[str]`, *optional*): + A list of strings corresponding to the earlier replies from the model. Defaults to None. + past_user_inputs (`List[str]`, *optional*): + A list of strings corresponding to the earlier replies from the user. Should be the same length as + `generated_responses`. Defaults to None. + parameters (`Dict[str, Any]`, *optional*): + Additional parameters for the conversational task. Defaults to None. For more details about the available + parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#conversational-task) + model (`str`, *optional*): + The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used. + Defaults to None. + + Returns: + `Dict`: The generated conversational output. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> output = await client.conversational("Hi, who are you?") + >>> output + {'generated_text': 'I am the one who knocks.', 'conversation': {'generated_responses': ['I am the one who knocks.'], 'past_user_inputs': ['Hi, who are you?']}, 'warnings': ['Setting `pad_token_id` to `eos_token_id`:50256 async for open-end generation.']} + >>> await client.conversational( + ... "Wow, that's scary!", + ... generated_responses=output["conversation"]["generated_responses"], + ... past_user_inputs=output["conversation"]["past_user_inputs"], + ... ) + ``` + """ + warnings.warn( + "'InferenceClient.conversational' is deprecated and will be removed starting from huggingface_hub>=0.25. " + "Please use the more appropriate 'InferenceClient.chat_completion' API instead.", + FutureWarning, + ) + payload: Dict[str, Any] = {"inputs": {"text": text}} + if generated_responses is not None: + payload["inputs"]["generated_responses"] = generated_responses + if past_user_inputs is not None: + payload["inputs"]["past_user_inputs"] = past_user_inputs + if parameters is not None: + payload["parameters"] = parameters + response = await self.post(json=payload, model=model, task="conversational") + return _bytes_to_dict(response) # type: ignore + + async def document_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + ) -> List[DocumentQuestionAnsweringOutputElement]: + """ + Answer questions on document images. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for the context. It can be raw bytes, an image file, or a URL to an online image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the document question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended document question answering model will be used. + Defaults to None. + + Returns: + `List[DocumentQuestionAnsweringOutputElement]`: a list of [`DocumentQuestionAnsweringOutputElement`] items containing the predicted label, associated probability, word ids, and page number. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.document_question_answering(image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", question="What is the invoice number?") + [DocumentQuestionAnsweringOutputElement(score=0.42515629529953003, answer='us-001', start=16, end=16)] + ``` + """ + payload: Dict[str, Any] = {"question": question, "image": _b64_encode(image)} + response = await self.post(json=payload, model=model, task="document-question-answering") + return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response) + + async def feature_extraction(self, text: str, *, model: Optional[str] = None) -> "np.ndarray": + """ + Generate embeddings for a given text. + + Args: + text (`str`): + The text to embed. + model (`str`, *optional*): + The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used. + Defaults to None. + + Returns: + `np.ndarray`: The embedding representing the input text as a float32 numpy array. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.feature_extraction("Hi, who are you?") + array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ], + [-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ], + ..., + [ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32) + ``` + """ + response = await self.post(json={"inputs": text}, model=model, task="feature-extraction") + np = _import_numpy() + return np.array(_bytes_to_dict(response), dtype="float32") + + async def fill_mask(self, text: str, *, model: Optional[str] = None) -> List[FillMaskOutputElement]: + """ + Fill in a hole with a missing word (token to be precise). + + Args: + text (`str`): + a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask). + model (`str`, *optional*): + The model to use for the fill mask task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended fill mask model will be used. + Defaults to None. + + Returns: + `List[FillMaskOutputElement]`: a list of [`FillMaskOutputElement`] items containing the predicted label, associated + probability, token reference, and completed text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.fill_mask("The goal of life is .") + [ + FillMaskOutputElement(score=0.06897063553333282, token=11098, token_str=' happiness', sequence='The goal of life is happiness.'), + FillMaskOutputElement(score=0.06554922461509705, token=45075, token_str=' immortality', sequence='The goal of life is immortality.') + ] + ``` + """ + response = await self.post(json={"inputs": text}, model=model, task="fill-mask") + return FillMaskOutputElement.parse_obj_as_list(response) + + async def image_classification( + self, + image: ContentT, + *, + model: Optional[str] = None, + ) -> List[ImageClassificationOutputElement]: + """ + Perform image classification on the given image using the specified model. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to classify. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used. + + Returns: + `List[ImageClassificationOutputElement]`: a list of [`ImageClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + [ImageClassificationOutputElement(score=0.9779096841812134, label='Blenheim spaniel'), ...] + ``` + """ + response = await self.post(data=image, model=model, task="image-classification") + return ImageClassificationOutputElement.parse_obj_as_list(response) + + async def image_segmentation( + self, + image: ContentT, + *, + model: Optional[str] = None, + ) -> List[ImageSegmentationOutputElement]: + """ + Perform image segmentation on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to segment. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used. + + Returns: + `List[ImageSegmentationOutputElement]`: A list of [`ImageSegmentationOutputElement`] items containing the segmented masks and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.image_segmentation("cat.jpg"): + [ImageSegmentationOutputElement(score=0.989008, label='LABEL_184', mask=), ...] + ``` + """ + response = await self.post(data=image, model=model, task="image-segmentation") + output = ImageSegmentationOutputElement.parse_obj_as_list(response) + for item in output: + item.mask = _b64_to_image(item.mask) + return output + + async def image_to_image( + self, + image: ContentT, + prompt: Optional[str] = None, + *, + negative_prompt: Optional[str] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + **kwargs, + ) -> "Image": + """ + Perform image-to-image translation using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for translation. It can be raw bytes, an image file, or a URL to an online image. + prompt (`str`, *optional*): + The text prompt to guide the image generation. + negative_prompt (`str`, *optional*): + A negative prompt to guide the translation process. + height (`int`, *optional*): + The height in pixels of the generated image. + width (`int`, *optional*): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*): + Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `Image`: The translated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> image = await client.image_to_image("cat.jpg", prompt="turn the cat into a tiger") + >>> image.save("tiger.jpg") + ``` + """ + parameters = { + "prompt": prompt, + "negative_prompt": negative_prompt, + "height": height, + "width": width, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + **kwargs, + } + if all(parameter is None for parameter in parameters.values()): + # Either only an image to send => send as raw bytes + data = image + payload: Optional[Dict[str, Any]] = None + else: + # Or an image + some parameters => use base64 encoding + data = None + payload = {"inputs": _b64_encode(image)} + for key, value in parameters.items(): + if value is not None: + payload.setdefault("parameters", {})[key] = value + + response = await self.post(json=payload, data=data, model=model, task="image-to-image") + return _bytes_to_image(response) + + async def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput: + """ + Takes an input image and return text. + + Models can have very different outputs depending on your use case (image captioning, optical character recognition + (OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image to caption. It can be raw bytes, an image file, or a URL to an online image.. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + [`ImageToTextOutput`]: The generated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.image_to_text("cat.jpg") + 'a cat standing in a grassy field ' + >>> await client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + 'a dog laying on the grass next to a flower pot ' + ``` + """ + response = await self.post(data=image, model=model, task="image-to-text") + return ImageToTextOutput.parse_obj_as_instance(response) + + async def list_deployed_models( + self, frameworks: Union[None, str, Literal["all"], List[str]] = None + ) -> Dict[str, List[str]]: + """ + List models deployed on the Serverless Inference API service. + + This helper checks deployed models framework by framework. By default, it will check the 4 main frameworks that + are supported and account for 95% of the hosted models. However, if you want a complete list of models you can + specify `frameworks="all"` as input. Alternatively, if you know before-hand which framework you are interested + in, you can also restrict to search to this one (e.g. `frameworks="text-generation-inference"`). The more + frameworks are checked, the more time it will take. + + + + This endpoint method does not return a live list of all models available for the Serverless Inference API service. + It searches over a cached list of models that were recently available and the list may not be up to date. + If you want to know the live status of a specific model, use [`~InferenceClient.get_model_status`]. + + + + + + This endpoint method is mostly useful for discoverability. If you already know which model you want to use and want to + check its availability, you can directly use [`~InferenceClient.get_model_status`]. + + + + Args: + frameworks (`Literal["all"]` or `List[str]` or `str`, *optional*): + The frameworks to filter on. By default only a subset of the available frameworks are tested. If set to + "all", all available frameworks will be tested. It is also possible to provide a single framework or a + custom set of frameworks to check. + + Returns: + `Dict[str, List[str]]`: A dictionary mapping task names to a sorted list of model IDs. + + Example: + ```py + # Must be run in an async contextthon + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + # Discover zero-shot-classification models currently deployed + >>> models = await client.list_deployed_models() + >>> models["zero-shot-classification"] + ['Narsil/deberta-large-mnli-zero-cls', 'facebook/bart-large-mnli', ...] + + # List from only 1 framework + >>> await client.list_deployed_models("text-generation-inference") + {'text-generation': ['bigcode/starcoder', 'meta-llama/Llama-2-70b-chat-hf', ...], ...} + ``` + """ + # Resolve which frameworks to check + if frameworks is None: + frameworks = MAIN_INFERENCE_API_FRAMEWORKS + elif frameworks == "all": + frameworks = ALL_INFERENCE_API_FRAMEWORKS + elif isinstance(frameworks, str): + frameworks = [frameworks] + frameworks = list(set(frameworks)) + + # Fetch them iteratively + models_by_task: Dict[str, List[str]] = {} + + def _unpack_response(framework: str, items: List[Dict]) -> None: + for model in items: + if framework == "sentence-transformers": + # Model running with the `sentence-transformers` framework can work with both tasks even if not + # branded as such in the API response + models_by_task.setdefault("feature-extraction", []).append(model["model_id"]) + models_by_task.setdefault("sentence-similarity", []).append(model["model_id"]) + else: + models_by_task.setdefault(model["task"], []).append(model["model_id"]) + + async def _fetch_framework(framework: str) -> None: + async with _import_aiohttp().ClientSession(headers=self.headers) as client: + response = await client.get(f"{INFERENCE_ENDPOINT}/framework/{framework}") + response.raise_for_status() + _unpack_response(framework, await response.json()) + + import asyncio + + await asyncio.gather(*[_fetch_framework(framework) for framework in frameworks]) + + # Sort alphabetically for discoverability and return + for task, models in models_by_task.items(): + models_by_task[task] = sorted(set(models), key=lambda x: x.lower()) + return models_by_task + + async def object_detection( + self, + image: ContentT, + *, + model: Optional[str] = None, + ) -> List[ObjectDetectionOutputElement]: + """ + Perform object detection on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to detect objects on. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for object detection. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for object detection (DETR) will be used. + + Returns: + `List[ObjectDetectionOutputElement]`: A list of [`ObjectDetectionOutputElement`] items containing the bounding boxes and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If the request output is not a List. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.object_detection("people.jpg"): + [ObjectDetectionOutputElement(score=0.9486683011054993, label='person', box=ObjectDetectionBoundingBox(xmin=59, ymin=39, xmax=420, ymax=510)), ...] + ``` + """ + # detect objects + response = await self.post(data=image, model=model, task="object-detection") + return ObjectDetectionOutputElement.parse_obj_as_list(response) + + async def question_answering( + self, question: str, context: str, *, model: Optional[str] = None + ) -> QuestionAnsweringOutputElement: + """ + Retrieve the answer to a question from a given text. + + Args: + question (`str`): + Question to be answered. + context (`str`): + The context of the question. + model (`str`): + The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. + + Returns: + [`QuestionAnsweringOutputElement`]: an question answering output containing the score, start index, end index, and answer. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") + QuestionAnsweringOutputElement(score=0.9326562285423279, start=11, end=16, answer='Clara') + ``` + """ + + payload: Dict[str, Any] = {"question": question, "context": context} + response = await self.post( + json=payload, + model=model, + task="question-answering", + ) + return QuestionAnsweringOutputElement.parse_obj_as_instance(response) + + async def sentence_similarity( + self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None + ) -> List[float]: + """ + Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings. + + Args: + sentence (`str`): + The main sentence to compare to others. + other_sentences (`List[str]`): + The list of sentences to compare to. + model (`str`, *optional*): + The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used. + Defaults to None. + + Returns: + `List[float]`: The embedding representing the input text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.sentence_similarity( + ... "Machine learning is so easy.", + ... other_sentences=[ + ... "Deep learning is so straightforward.", + ... "This is so difficult, like rocket science.", + ... "I can't believe how much I struggled with this.", + ... ], + ... ) + [0.7785726189613342, 0.45876261591911316, 0.2906220555305481] + ``` + """ + response = await self.post( + json={"inputs": {"source_sentence": sentence, "sentences": other_sentences}}, + model=model, + task="sentence-similarity", + ) + return _bytes_to_list(response) + + async def summarization( + self, + text: str, + *, + parameters: Optional[Dict[str, Any]] = None, + model: Optional[str] = None, + ) -> SummarizationOutput: + """ + Generate a summary of a given text using a specified model. + + Args: + text (`str`): + The input text to summarize. + parameters (`Dict[str, Any]`, *optional*): + Additional parameters for summarization. Check out this [page](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task) + for more details. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + [`SummarizationOutput`]: The generated summary text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.summarization("The Eiffel tower...") + SummarizationOutput(generated_text="The Eiffel tower is one of the most famous landmarks in the world....") + ``` + """ + payload: Dict[str, Any] = {"inputs": text} + if parameters is not None: + payload["parameters"] = parameters + response = await self.post(json=payload, model=model, task="summarization") + return SummarizationOutput.parse_obj_as_list(response)[0] + + async def table_question_answering( + self, table: Dict[str, Any], query: str, *, model: Optional[str] = None + ) -> TableQuestionAnsweringOutputElement: + """ + Retrieve the answer to a question from information given in a table. + + Args: + table (`str`): + A table of data represented as a dict of lists where entries are headers and the lists are all the + values, all lists must have the same size. + query (`str`): + The query in plain text that you want to ask the table. + model (`str`): + The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face + Hub or a URL to a deployed Inference Endpoint. + + Returns: + [`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> query = "How many stars does the transformers repository have?" + >>> table = {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"]} + >>> await client.table_question_answering(table, query, model="google/tapas-base-finetuned-wtq") + TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE') + ``` + """ + response = await self.post( + json={ + "query": query, + "table": table, + }, + model=model, + task="table-question-answering", + ) + return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response) + + async def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[str]: + """ + Classifying a target category (a group) based on a set of attributes. + + Args: + table (`Dict[str, Any]`): + Set of attributes to classify. + model (`str`, *optional*): + The model to use for the tabular classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular classification model will be used. + Defaults to None. + + Returns: + `List`: a list of labels, one per row in the initial table. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> table = { + ... "fixed_acidity": ["7.4", "7.8", "10.3"], + ... "volatile_acidity": ["0.7", "0.88", "0.32"], + ... "citric_acid": ["0", "0", "0.45"], + ... "residual_sugar": ["1.9", "2.6", "6.4"], + ... "chlorides": ["0.076", "0.098", "0.073"], + ... "free_sulfur_dioxide": ["11", "25", "5"], + ... "total_sulfur_dioxide": ["34", "67", "13"], + ... "density": ["0.9978", "0.9968", "0.9976"], + ... "pH": ["3.51", "3.2", "3.23"], + ... "sulphates": ["0.56", "0.68", "0.82"], + ... "alcohol": ["9.4", "9.8", "12.6"], + ... } + >>> await client.tabular_classification(table=table, model="julien-c/wine-quality") + ["5", "5", "5"] + ``` + """ + response = await self.post(json={"table": table}, model=model, task="tabular-classification") + return _bytes_to_list(response) + + async def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[float]: + """ + Predicting a numerical target value given a set of attributes/features in a table. + + Args: + table (`Dict[str, Any]`): + Set of attributes stored in a table. The attributes used to predict the target can be both numerical and categorical. + model (`str`, *optional*): + The model to use for the tabular regression task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular regression model will be used. + Defaults to None. + + Returns: + `List`: a list of predicted numerical target values. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> table = { + ... "Height": ["11.52", "12.48", "12.3778"], + ... "Length1": ["23.2", "24", "23.9"], + ... "Length2": ["25.4", "26.3", "26.5"], + ... "Length3": ["30", "31.2", "31.1"], + ... "Species": ["Bream", "Bream", "Bream"], + ... "Width": ["4.02", "4.3056", "4.6961"], + ... } + >>> await client.tabular_regression(table, model="scikit-learn/Fish-Weight") + [110, 120, 130] + ``` + """ + response = await self.post(json={"table": table}, model=model, task="tabular-regression") + return _bytes_to_list(response) + + async def text_classification( + self, text: str, *, model: Optional[str] = None + ) -> List[TextClassificationOutputElement]: + """ + Perform text classification (e.g. sentiment-analysis) on the given text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the text classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended text classification model will be used. + Defaults to None. + + Returns: + `List[TextClassificationOutputElement]`: a list of [`TextClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.text_classification("I like you") + [ + TextClassificationOutputElement(label='POSITIVE', score=0.9998695850372314), + TextClassificationOutputElement(label='NEGATIVE', score=0.0001304351753788069), + ] + ``` + """ + response = await self.post(json={"inputs": text}, model=model, task="text-classification") + return TextClassificationOutputElement.parse_obj_as_list(response)[0] # type: ignore [return-value] + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, # Same as `stop` + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> str: ... + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, # Same as `stop` + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> TextGenerationOutput: ... + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, # Same as `stop` + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> AsyncIterable[str]: ... + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, # Same as `stop` + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> AsyncIterable[TextGenerationStreamOutput]: ... + + @overload + async def text_generation( + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: bool = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, # Same as `stop` + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Union[TextGenerationOutput, AsyncIterable[TextGenerationStreamOutput]]: ... + + async def text_generation( + self, + prompt: str, + *, + details: bool = False, + stream: bool = False, + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, # Same as `stop` + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Union[str, TextGenerationOutput, AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]: + """ + Given a prompt, generate the following text. + + API endpoint is supposed to run with the `text-generation-inference` backend (TGI). This backend is the + go-to solution to run large language models at scale. However, for some smaller models (e.g. "gpt2") the + default `transformers` + `api-inference` solution is still in use. Both approaches have very similar APIs, but + not exactly the same. This method is compatible with both approaches but some parameters are only available for + `text-generation-inference`. If some parameters are ignored, a warning message is triggered but the process + continues correctly. + + To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference. + + Args: + prompt (`str`): + Input text. + details (`bool`, *optional*): + By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens, + probabilities, seed, finish reason, etc.). Only available for models running on with the + `text-generation-inference` backend. + stream (`bool`, *optional*): + By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of + tokens to be returned. Only available for models running on with the `text-generation-inference` + backend. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + best_of (`int`, *optional*): + Generate best_of sequences and return the one if the highest token logprobs. + decoder_input_details (`bool`, *optional*): + Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken + into account. Defaults to `False`. + do_sample (`bool`, *optional*): + Activate logits sampling + frequency_penalty (`float`, *optional*): + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in + the text so far, decreasing the model's likelihood to repeat the same line verbatim. + grammar ([`TextGenerationInputGrammarType`], *optional*): + Grammar constraints. Can be either a JSONSchema or a regex. + max_new_tokens (`int`, *optional*): + Maximum number of generated tokens + repetition_penalty (`float`, *optional*): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + return_full_text (`bool`, *optional*): + Whether to prepend the prompt to the generated text + seed (`int`, *optional*): + Random sampling seed + stop_sequences (`List[str]`, *optional*): + Stop generating tokens if a member of `stop_sequences` is generated + temperature (`float`, *optional*): + The value used to module the logits distribution. + top_n_tokens (`int`, *optional*): + Return information about the `top_n_tokens` most likely tokens at each generation step, instead of + just the sampled token. + top_k (`int`, *optional`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + truncate (`int`, *optional`): + Truncate inputs tokens to the given size. + typical_p (`float`, *optional`): + Typical Decoding mass + See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information + watermark (`bool`, *optional`): + Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) + + Returns: + `Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]`: + Generated text returned from the server: + - if `stream=False` and `details=False`, the generated text is returned as a `str` (default) + - if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]` + - if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.TextGenerationOutput`] + - if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.TextGenerationStreamOutput`] + + Raises: + `ValidationError`: + If input values are not valid. No HTTP call is made to the server. + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + # Case 1: generate text + >>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12) + '100% open source and built to be easy to use.' + + # Case 2: iterate over the generated tokens. Useful async for large generation. + >>> async for token in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True): + ... print(token) + 100 + % + open + source + and + built + to + be + easy + to + use + . + + # Case 3: get more details about the generation process. + >>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True) + TextGenerationOutput( + generated_text='100% open source and built to be easy to use.', + details=TextGenerationDetails( + finish_reason='length', + generated_tokens=12, + seed=None, + prefill=[ + TextGenerationPrefillOutputToken(id=487, text='The', logprob=None), + TextGenerationPrefillOutputToken(id=53789, text=' hugging', logprob=-13.171875), + (...) + TextGenerationPrefillOutputToken(id=204, text=' ', logprob=-7.0390625) + ], + tokens=[ + TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), + TokenElement(id=16, text='%', logprob=-0.0463562, special=False), + (...) + TokenElement(id=25, text='.', logprob=-0.5703125, special=False) + ], + best_of_sequences=None + ) + ) + + # Case 4: iterate over the generated tokens with more details. + # Last object is more complete, containing the full generated text and the finish reason. + >>> async for details in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True): + ... print(details) + ... + TextGenerationStreamOutput(token=TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement( + id=25, + text='.', + logprob=-0.5703125, + special=False), + generated_text='100% open source and built to be easy to use.', + details=TextGenerationStreamOutputStreamDetails(finish_reason='length', generated_tokens=12, seed=None) + ) + + # Case 5: generate constrained output using grammar + >>> response = await client.text_generation( + ... prompt="I saw a puppy a cat and a raccoon during my bike ride in the park", + ... model="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1", + ... max_new_tokens=100, + ... repetition_penalty=1.3, + ... grammar={ + ... "type": "json", + ... "value": { + ... "properties": { + ... "location": {"type": "string"}, + ... "activity": {"type": "string"}, + ... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5}, + ... "animals": {"type": "array", "items": {"type": "string"}}, + ... }, + ... "required": ["location", "activity", "animals_seen", "animals"], + ... }, + ... }, + ... ) + >>> json.loads(response) + { + "activity": "bike riding", + "animals": ["puppy", "cat", "raccoon"], + "animals_seen": 3, + "location": "park" + } + ``` + """ + if decoder_input_details and not details: + warnings.warn( + "`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that" + " the output from the server will be truncated." + ) + decoder_input_details = False + + # Build payload + parameters = { + "best_of": best_of, + "decoder_input_details": decoder_input_details, + "do_sample": do_sample, + "frequency_penalty": frequency_penalty, + "grammar": grammar, + "max_new_tokens": max_new_tokens, + "repetition_penalty": repetition_penalty, + "return_full_text": return_full_text, + "seed": seed, + "stop": stop_sequences if stop_sequences is not None else [], + "temperature": temperature, + "top_k": top_k, + "top_n_tokens": top_n_tokens, + "top_p": top_p, + "truncate": truncate, + "typical_p": typical_p, + "watermark": watermark, + } + parameters = {k: v for k, v in parameters.items() if v is not None} + payload = { + "inputs": prompt, + "parameters": parameters, + "stream": stream, + } + + # Remove some parameters if not a TGI server + unsupported_kwargs = _get_unsupported_text_generation_kwargs(model) + if len(unsupported_kwargs) > 0: + # The server does not support some parameters + # => means it is not a TGI server + # => remove unsupported parameters and warn the user + + ignored_parameters = [] + for key in unsupported_kwargs: + if parameters.get(key): + ignored_parameters.append(key) + parameters.pop(key, None) + if len(ignored_parameters) > 0: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Ignoring following parameters:" + f" {', '.join(ignored_parameters)}.", + UserWarning, + ) + if details: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will" + " be ignored meaning only the generated text will be returned.", + UserWarning, + ) + details = False + if stream: + raise ValueError( + "API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream." + " Please pass `stream=False` as input." + ) + + # Handle errors separately for more precise error messages + try: + bytes_output = await self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore + except _import_aiohttp().ClientResponseError as e: + match = MODEL_KWARGS_NOT_USED_REGEX.search(e.response_error_payload["error"]) + if e.status == 400 and match: + unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(",")] + _set_unsupported_text_generation_kwargs(model, unused_params) + return await self.text_generation( # type: ignore + prompt=prompt, + details=details, + stream=stream, + model=model, + best_of=best_of, + decoder_input_details=decoder_input_details, + do_sample=do_sample, + frequency_penalty=frequency_penalty, + grammar=grammar, + max_new_tokens=max_new_tokens, + repetition_penalty=repetition_penalty, + return_full_text=return_full_text, + seed=seed, + stop_sequences=stop_sequences, + temperature=temperature, + top_k=top_k, + top_n_tokens=top_n_tokens, + top_p=top_p, + truncate=truncate, + typical_p=typical_p, + watermark=watermark, + ) + raise_text_generation_error(e) + + # Parse output + if stream: + return _async_stream_text_generation_response(bytes_output, details) # type: ignore + + data = _bytes_to_dict(bytes_output)[0] # type: ignore[arg-type] + return TextGenerationOutput.parse_obj_as_instance(data) if details else data["generated_text"] + + async def text_to_image( + self, + prompt: str, + *, + negative_prompt: Optional[str] = None, + height: Optional[float] = None, + width: Optional[float] = None, + num_inference_steps: Optional[float] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + **kwargs, + ) -> "Image": + """ + Generate an image based on a given text using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + prompt (`str`): + The prompt to generate an image from. + negative_prompt (`str`, *optional*): + An optional negative prompt for the image generation. + height (`float`, *optional*): + The height in pixels of the image to generate. + width (`float`, *optional*): + The width in pixels of the image to generate. + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*): + Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `Image`: The generated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + >>> image = await client.text_to_image("An astronaut riding a horse on the moon.") + >>> image.save("astronaut.png") + + >>> image = await client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... negative_prompt="low resolution, blurry", + ... model="stabilityai/stable-diffusion-2-1", + ... ) + >>> image.save("better_astronaut.png") + ``` + """ + payload = {"inputs": prompt} + parameters = { + "negative_prompt": negative_prompt, + "height": height, + "width": width, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + **kwargs, + } + for key, value in parameters.items(): + if value is not None: + payload.setdefault("parameters", {})[key] = value # type: ignore + response = await self.post(json=payload, model=model, task="text-to-image") + return _bytes_to_image(response) + + async def text_to_speech(self, text: str, *, model: Optional[str] = None) -> bytes: + """ + Synthesize an audio of a voice pronouncing a given text. + + Args: + text (`str`): + The text to synthesize. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `bytes`: The generated audio. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from pathlib import Path + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + >>> audio = await client.text_to_speech("Hello world") + >>> Path("hello_world.flac").write_bytes(audio) + ``` + """ + return await self.post(json={"inputs": text}, model=model, task="text-to-speech") + + async def token_classification( + self, text: str, *, model: Optional[str] = None + ) -> List[TokenClassificationOutputElement]: + """ + Perform token classification on the given text. + Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the token classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended token classification model will be used. + Defaults to None. + + Returns: + `List[TokenClassificationOutputElement]`: List of [`TokenClassificationOutputElement`] items containing the entity group, confidence score, word, start and end index. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.token_classification("My name is Sarah Jessica Parker but you can call me Jessica") + [ + TokenClassificationOutputElement( + entity_group='PER', + score=0.9971321225166321, + word='Sarah Jessica Parker', + start=11, + end=31, + ), + TokenClassificationOutputElement( + entity_group='PER', + score=0.9773476123809814, + word='Jessica', + start=52, + end=59, + ) + ] + ``` + """ + payload: Dict[str, Any] = {"inputs": text} + response = await self.post( + json=payload, + model=model, + task="token-classification", + ) + return TokenClassificationOutputElement.parse_obj_as_list(response) + + async def translation( + self, text: str, *, model: Optional[str] = None, src_lang: Optional[str] = None, tgt_lang: Optional[str] = None + ) -> TranslationOutput: + """ + Convert text from one language to another. + + Check out https://huggingface.co/tasks/translation for more information on how to choose the best model for + your specific use case. Source and target languages usually depend on the model. + However, it is possible to specify source and target languages for certain models. If you are working with one of these models, + you can use `src_lang` and `tgt_lang` arguments to pass the relevant information. + You can find this information in the model card. + + Args: + text (`str`): + A string to be translated. + model (`str`, *optional*): + The model to use for the translation task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended translation model will be used. + Defaults to None. + src_lang (`str`, *optional*): + Source language of the translation task, i.e. input language. Cannot be passed without `tgt_lang`. + tgt_lang (`str`, *optional*): + Target language of the translation task, i.e. output language. Cannot be passed without `src_lang`. + + Returns: + [`TranslationOutput`]: The generated translated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If only one of the `src_lang` and `tgt_lang` arguments are provided. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.translation("My name is Wolfgang and I live in Berlin") + 'Mein Name ist Wolfgang und ich lebe in Berlin.' + >>> await client.translation("My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr") + TranslationOutput(translation_text='Je m\'appelle Wolfgang et je vis à Berlin.') + ``` + + Specifying languages: + ```py + >>> client.translation("My name is Sarah Jessica Parker but you can call me Jessica", model="facebook/mbart-large-50-many-to-many-mmt", src_lang="en_XX", tgt_lang="fr_XX") + "Mon nom est Sarah Jessica Parker mais vous pouvez m\'appeler Jessica" + ``` + """ + # Throw error if only one of `src_lang` and `tgt_lang` was given + if src_lang is not None and tgt_lang is None: + raise ValueError("You cannot specify `src_lang` without specifying `tgt_lang`.") + + if src_lang is None and tgt_lang is not None: + raise ValueError("You cannot specify `tgt_lang` without specifying `src_lang`.") + + # If both `src_lang` and `tgt_lang` are given, pass them to the request body + payload: Dict = {"inputs": text} + if src_lang and tgt_lang: + payload["parameters"] = {"src_lang": src_lang, "tgt_lang": tgt_lang} + response = await self.post(json=payload, model=model, task="translation") + return TranslationOutput.parse_obj_as_list(response)[0] + + async def visual_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + ) -> List[VisualQuestionAnsweringOutputElement]: + """ + Answering open-ended questions based on an image. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for the context. It can be raw bytes, an image file, or a URL to an online image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the visual question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended visual question answering model will be used. + Defaults to None. + + Returns: + `List[VisualQuestionAnsweringOutputElement]`: a list of [`VisualQuestionAnsweringOutputElement`] items containing the predicted label and associated probability. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.visual_question_answering( + ... image="https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg", + ... question="What is the animal doing?" + ... ) + [ + VisualQuestionAnsweringOutputElement(score=0.778609573841095, answer='laying down'), + VisualQuestionAnsweringOutputElement(score=0.6957435607910156, answer='sitting'), + ] + ``` + """ + payload: Dict[str, Any] = {"question": question, "image": _b64_encode(image)} + response = await self.post(json=payload, model=model, task="visual-question-answering") + return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response) + + async def zero_shot_classification( + self, text: str, labels: List[str], *, multi_label: bool = False, model: Optional[str] = None + ) -> List[ZeroShotClassificationOutputElement]: + """ + Provide as input a text and a set of candidate labels to classify the input text. + + Args: + text (`str`): + The input text to classify. + labels (`List[str]`): + List of string possible labels. There must be at least 2 labels. + multi_label (`bool`): + Boolean that is set to True if classes can overlap. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `List[ZeroShotClassificationOutputElement]`: List of [`ZeroShotClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> text = ( + ... "A new model offers an explanation async for how the Galilean satellites formed around the solar system's" + ... "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling" + ... " mysteries when he went async for a run up a hill in Nice, France." + ... ) + >>> labels = ["space & cosmos", "scientific discovery", "microbiology", "robots", "archeology"] + >>> await client.zero_shot_classification(text, labels) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.7961668968200684), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.18570658564567566), + ZeroShotClassificationOutputElement(label='microbiology', score=0.00730885099619627), + ZeroShotClassificationOutputElement(label='archeology', score=0.006258360575884581), + ZeroShotClassificationOutputElement(label='robots', score=0.004559356719255447), + ] + >>> await client.zero_shot_classification(text, labels, multi_label=True) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.9829297661781311), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.755190908908844), + ZeroShotClassificationOutputElement(label='microbiology', score=0.0005462635890580714), + ZeroShotClassificationOutputElement(label='archeology', score=0.00047131875180639327), + ZeroShotClassificationOutputElement(label='robots', score=0.00030448526376858354), + ] + ``` + """ + # Raise ValueError if input is less than 2 labels + if len(labels) < 2: + raise ValueError("You must specify at least 2 classes to compare.") + + response = await self.post( + json={ + "inputs": text, + "parameters": { + "candidate_labels": ",".join(labels), + "multi_label": multi_label, + }, + }, + model=model, + task="zero-shot-classification", + ) + output = _bytes_to_dict(response) + return [ + ZeroShotClassificationOutputElement.parse_obj_as_instance({"label": label, "score": score}) + for label, score in zip(output["labels"], output["scores"]) + ] + + async def zero_shot_image_classification( + self, image: ContentT, labels: List[str], *, model: Optional[str] = None + ) -> List[ZeroShotImageClassificationOutputElement]: + """ + Provide input image and text labels to predict text labels for the image. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image to caption. It can be raw bytes, an image file, or a URL to an online image. + labels (`List[str]`): + List of string possible labels. There must be at least 2 labels. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `List[ZeroShotImageClassificationOutputElement]`: List of [`ZeroShotImageClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + >>> await client.zero_shot_image_classification( + ... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg", + ... labels=["dog", "cat", "horse"], + ... ) + [ZeroShotImageClassificationOutputElement(label='dog', score=0.956),...] + ``` + """ + # Raise ValueError if input is less than 2 labels + if len(labels) < 2: + raise ValueError("You must specify at least 2 classes to compare.") + + response = await self.post( + json={"image": _b64_encode(image), "parameters": {"candidate_labels": ",".join(labels)}}, + model=model, + task="zero-shot-image-classification", + ) + return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response) + + def _resolve_url(self, model: Optional[str] = None, task: Optional[str] = None) -> str: + model = model or self.model + + # If model is already a URL, ignore `task` and return directly + if model is not None and (model.startswith("http://") or model.startswith("https://")): + return model + + # # If no model but task is set => fetch the recommended one for this task + if model is None: + if task is None: + raise ValueError( + "You must specify at least a model (repo_id or URL) or a task, either when instantiating" + " `InferenceClient` or when making a request." + ) + model = self.get_recommended_model(task) + logger.info( + f"Using recommended model {model} for task {task}. Note that it is" + f" encouraged to explicitly set `model='{model}'` as the recommended" + " models list might get updated without prior notice." + ) + + # Compute InferenceAPI url + return ( + # Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks. + f"{INFERENCE_ENDPOINT}/pipeline/{task}/{model}" + if task in ("feature-extraction", "sentence-similarity") + # Otherwise, we use the default endpoint + else f"{INFERENCE_ENDPOINT}/models/{model}" + ) + + @staticmethod + def get_recommended_model(task: str) -> str: + """ + Get the model Hugging Face recommends for the input task. + + Args: + task (`str`): + The Hugging Face task to get which model Hugging Face recommends. + All available tasks can be found [here](https://huggingface.co/tasks). + + Returns: + `str`: Name of the model recommended for the input task. + + Raises: + `ValueError`: If Hugging Face has no recommendation for the input task. + """ + model = _fetch_recommended_models().get(task) + if model is None: + raise ValueError( + f"Task {task} has no recommended model. Please specify a model" + " explicitly. Visit https://huggingface.co/tasks for more info." + ) + return model + + async def get_model_status(self, model: Optional[str] = None) -> ModelStatus: + """ + Get the status of a model hosted on the Inference API. + + + + This endpoint is mostly useful when you already know which model you want to use and want to check its + availability. If you want to discover already deployed models, you should rather use [`~InferenceClient.list_deployed_models`]. + + + + Args: + model (`str`, *optional*): + Identifier of the model for witch the status gonna be checked. If model is not provided, + the model associated with this instance of [`InferenceClient`] will be used. Only InferenceAPI service can be checked so the + identifier cannot be a URL. + + + Returns: + [`ModelStatus`]: An instance of ModelStatus dataclass, containing information, + about the state of the model: load, state, compute type and framework. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.get_model_status("bigcode/starcoder") + ModelStatus(loaded=True, state='Loaded', compute_type='gpu', framework='text-generation-inference') + ``` + """ + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if model.startswith("https://"): + raise NotImplementedError("Model status is only available for Inference API endpoints.") + url = f"{INFERENCE_ENDPOINT}/status/{model}" + + async with _import_aiohttp().ClientSession(headers=self.headers) as client: + response = await client.get(url) + response.raise_for_status() + response_data = await response.json() + + if "error" in response_data: + raise ValueError(response_data["error"]) + + return ModelStatus( + loaded=response_data["loaded"], + state=response_data["state"], + compute_type=response_data["compute_type"], + framework=response_data["framework"], + ) diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e79930374b57b209a78869a5fe8515ad469ca4a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py @@ -0,0 +1,132 @@ +# This file is auto-generated by `utils/generate_inference_types.py`. +# Do not modify it manually. +# +# ruff: noqa: F401 + +from .audio_classification import ( + AudioClassificationInput, + AudioClassificationOutputElement, + AudioClassificationParameters, +) +from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement +from .automatic_speech_recognition import ( + AutomaticSpeechRecognitionGenerationParameters, + AutomaticSpeechRecognitionInput, + AutomaticSpeechRecognitionOutput, + AutomaticSpeechRecognitionOutputChunk, + AutomaticSpeechRecognitionParameters, +) +from .base import BaseInferenceType +from .chat_completion import ( + ChatCompletionInput, + ChatCompletionInputFunctionDefinition, + ChatCompletionInputMessage, + ChatCompletionInputTool, + ChatCompletionInputToolCall, + ChatCompletionInputToolTypeClass, + ChatCompletionOutput, + ChatCompletionOutputComplete, + ChatCompletionOutputFunctionDefinition, + ChatCompletionOutputLogprob, + ChatCompletionOutputLogprobs, + ChatCompletionOutputMessage, + ChatCompletionOutputToolCall, + ChatCompletionOutputTopLogprob, + ChatCompletionOutputUsage, + ChatCompletionStreamOutput, + ChatCompletionStreamOutputChoice, + ChatCompletionStreamOutputDelta, + ChatCompletionStreamOutputDeltaToolCall, + ChatCompletionStreamOutputFunction, + ChatCompletionStreamOutputLogprob, + ChatCompletionStreamOutputLogprobs, + ChatCompletionStreamOutputTopLogprob, +) +from .depth_estimation import DepthEstimationInput, DepthEstimationOutput +from .document_question_answering import ( + DocumentQuestionAnsweringInput, + DocumentQuestionAnsweringInputData, + DocumentQuestionAnsweringOutputElement, + DocumentQuestionAnsweringParameters, +) +from .feature_extraction import FeatureExtractionInput +from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters +from .image_classification import ( + ImageClassificationInput, + ImageClassificationOutputElement, + ImageClassificationParameters, +) +from .image_segmentation import ImageSegmentationInput, ImageSegmentationOutputElement, ImageSegmentationParameters +from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize +from .image_to_text import ImageToTextGenerationParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters +from .object_detection import ( + ObjectDetectionBoundingBox, + ObjectDetectionInput, + ObjectDetectionOutputElement, + ObjectDetectionParameters, +) +from .question_answering import ( + QuestionAnsweringInput, + QuestionAnsweringInputData, + QuestionAnsweringOutputElement, + QuestionAnsweringParameters, +) +from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData +from .summarization import SummarizationGenerationParameters, SummarizationInput, SummarizationOutput +from .table_question_answering import ( + TableQuestionAnsweringInput, + TableQuestionAnsweringInputData, + TableQuestionAnsweringOutputElement, +) +from .text2text_generation import Text2TextGenerationInput, Text2TextGenerationOutput, Text2TextGenerationParameters +from .text_classification import TextClassificationInput, TextClassificationOutputElement, TextClassificationParameters +from .text_generation import ( + TextGenerationInput, + TextGenerationInputGenerateParameters, + TextGenerationInputGrammarType, + TextGenerationOutput, + TextGenerationOutputBestOfSequence, + TextGenerationOutputDetails, + TextGenerationOutputPrefillToken, + TextGenerationOutputToken, + TextGenerationStreamOutput, + TextGenerationStreamOutputStreamDetails, + TextGenerationStreamOutputToken, +) +from .text_to_audio import TextToAudioGenerationParameters, TextToAudioInput, TextToAudioOutput, TextToAudioParameters +from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters, TextToImageTargetSize +from .token_classification import ( + TokenClassificationInput, + TokenClassificationOutputElement, + TokenClassificationParameters, +) +from .translation import TranslationGenerationParameters, TranslationInput, TranslationOutput +from .video_classification import ( + VideoClassificationInput, + VideoClassificationOutputElement, + VideoClassificationParameters, +) +from .visual_question_answering import ( + VisualQuestionAnsweringInput, + VisualQuestionAnsweringInputData, + VisualQuestionAnsweringOutputElement, + VisualQuestionAnsweringParameters, +) +from .zero_shot_classification import ( + ZeroShotClassificationInput, + ZeroShotClassificationInputData, + ZeroShotClassificationOutputElement, + ZeroShotClassificationParameters, +) +from .zero_shot_image_classification import ( + ZeroShotImageClassificationInput, + ZeroShotImageClassificationInputData, + ZeroShotImageClassificationOutputElement, + ZeroShotImageClassificationParameters, +) +from .zero_shot_object_detection import ( + ZeroShotObjectDetectionBoundingBox, + ZeroShotObjectDetectionInput, + ZeroShotObjectDetectionInputData, + ZeroShotObjectDetectionOutputElement, +) diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py new file mode 100644 index 0000000000000000000000000000000000000000..8783484d2d466a74c1c634508c39a7e9cb2851a3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py @@ -0,0 +1,149 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains a base class for all inference types.""" + +import inspect +import json +import warnings +from dataclasses import asdict, dataclass +from typing import Any, Dict, List, Type, TypeVar, Union, get_args + + +T = TypeVar("T", bound="BaseInferenceType") + + +@dataclass +class BaseInferenceType(dict): + """Base class for all inference types. + + Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future. + + Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields + are made optional, and non-expected fields are added as dict attributes). + """ + + @classmethod + def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]: + """Alias to parse server response and return a single instance. + + See `parse_obj` for more details. + """ + output = cls.parse_obj(data) + if not isinstance(output, list): + raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.") + return output + + @classmethod + def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T: + """Alias to parse server response and return a single instance. + + See `parse_obj` for more details. + """ + output = cls.parse_obj(data) + if isinstance(output, list): + raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.") + return output + + @classmethod + def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]: + """Parse server response as a dataclass or list of dataclasses. + + To enable future-compatibility, we want to handle cases where the server return more fields than expected. + In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are + added as dict attributes. + """ + # Parse server response (from bytes) + if isinstance(data, bytes): + data = data.decode() + if isinstance(data, str): + data = json.loads(data) + + # If a list, parse each item individually + if isinstance(data, List): + return [cls.parse_obj(d) for d in data] # type: ignore [misc] + + # At this point, we expect a dict + if not isinstance(data, dict): + raise ValueError(f"Invalid data type: {type(data)}") + + init_values = {} + other_values = {} + for key, value in data.items(): + key = normalize_key(key) + if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init: + if isinstance(value, dict) or isinstance(value, list): + field_type = cls.__dataclass_fields__[key].type + + # if `field_type` is a `BaseInferenceType`, parse it + if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType): + value = field_type.parse_obj(value) + + # otherwise, recursively parse nested dataclasses (if possible) + # `get_args` returns handle Union and Optional for us + else: + expected_types = get_args(field_type) + for expected_type in expected_types: + if getattr(expected_type, "_name", None) == "List": + expected_type = get_args(expected_type)[ + 0 + ] # assume same type for all items in the list + if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType): + value = expected_type.parse_obj(value) + break + init_values[key] = value + else: + other_values[key] = value + + # Make all missing fields default to None + # => ensure that dataclass initialization will never fail even if the server does not return all fields. + for key in cls.__dataclass_fields__: + if key not in init_values: + init_values[key] = None + + # Initialize dataclass with expected values + item = cls(**init_values) + + # Add remaining fields as dict attributes + item.update(other_values) + return item + + def __post_init__(self): + self.update(asdict(self)) + + def __setitem__(self, __key: Any, __value: Any) -> None: + # Hacky way to keep dataclass values in sync when dict is updated + super().__setitem__(__key, __value) + if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value: + self.__setattr__(__key, __value) + return + + def __setattr__(self, __name: str, __value: Any) -> None: + # Hacky way to keep dict values is sync when dataclass is updated + super().__setattr__(__name, __value) + if self.get(__name) != __value: + self[__name] = __value + return + + def __getitem__(self, __key: Any) -> Any: + warnings.warn( + f"Accessing '{self.__class__.__name__}' values through dict is deprecated and " + "will be removed from version '0.25'. Use dataclass attributes instead.", + FutureWarning, + ) + return super().__getitem__(__key) + + +def normalize_key(key: str) -> str: + # e.g "content-type" -> "content_type", "Accept" -> "accept" + return key.replace("-", "_").replace(" ", "_").lower() diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebb9a9bc667bdb0d2afd7bb8e482fc18f6634d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py @@ -0,0 +1,105 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional, Union + +from .base import BaseInferenceType + + +EarlyStoppingEnum = Literal["never"] + + +@dataclass +class ImageToTextGenerationParameters(BaseInferenceType): + """Parametrization of the text generation process + Ad-hoc parametrization of the text generation process + """ + + do_sample: Optional[bool] = None + """Whether to use sampling instead of greedy decoding when generating new tokens.""" + early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None + """Controls the stopping condition for beam-based methods.""" + epsilon_cutoff: Optional[float] = None + """If set to float strictly between 0 and 1, only tokens with a conditional probability + greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + """ + eta_cutoff: Optional[float] = None + """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + float strictly between 0 and 1, a token is only considered if it is greater than either + eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + for more details. + """ + max_length: Optional[int] = None + """The maximum length (in tokens) of the generated text, including the input.""" + max_new_tokens: Optional[int] = None + """The maximum number of tokens to generate. Takes precedence over maxLength.""" + min_length: Optional[int] = None + """The minimum length (in tokens) of the generated text, including the input.""" + min_new_tokens: Optional[int] = None + """The minimum number of tokens to generate. Takes precedence over maxLength.""" + num_beam_groups: Optional[int] = None + """Number of groups to divide num_beams into in order to ensure diversity among different + groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + """ + num_beams: Optional[int] = None + """Number of beams to use for beam search.""" + penalty_alpha: Optional[float] = None + """The value balances the model confidence and the degeneration penalty in contrastive + search decoding. + """ + temperature: Optional[float] = None + """The value used to modulate the next token probabilities.""" + top_k: Optional[int] = None + """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" + top_p: Optional[float] = None + """If set to float < 1, only the smallest set of most probable tokens with probabilities + that add up to top_p or higher are kept for generation. + """ + typical_p: Optional[float] = None + """Local typicality measures how similar the conditional probability of predicting a target + token next is to the expected conditional probability of predicting a random token next, + given the partial text already generated. If set to float < 1, the smallest set of the + most locally typical tokens with probabilities that add up to typical_p or higher are + kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + """ + use_cache: Optional[bool] = None + """Whether the model should use the past last key/values attentions to speed up decoding""" + + +@dataclass +class ImageToTextParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Image To Text + """ + + generate: Optional[ImageToTextGenerationParameters] = None + """Parametrization of the text generation process""" + max_new_tokens: Optional[int] = None + """The amount of maximum tokens to generate.""" + + +@dataclass +class ImageToTextInput(BaseInferenceType): + """Inputs for Image To Text inference""" + + inputs: Any + """The input image data""" + parameters: Optional[ImageToTextParameters] = None + """Additional inference parameters""" + + +@dataclass +class ImageToTextOutput(BaseInferenceType): + """Outputs of inference for the Image To Text task""" + + generated_text: Any + image_to_text_output_generated_text: Optional[str] = None + """The generated text.""" diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a00e53264bd9a53a24d2ee7b12f428c068a117 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py @@ -0,0 +1,46 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, Literal, Optional + +from .base import BaseInferenceType + + +SummarizationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] + + +@dataclass +class SummarizationGenerationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text2text Generation + """ + + clean_up_tokenization_spaces: Optional[bool] = None + """Whether to clean up the potential extra spaces in the text output.""" + generate_parameters: Optional[Dict[str, Any]] = None + """Additional parametrization of the text generation algorithm""" + truncation: Optional["SummarizationGenerationTruncationStrategy"] = None + """The truncation strategy to use""" + + +@dataclass +class SummarizationInput(BaseInferenceType): + """Inputs for Summarization inference + Inputs for Text2text Generation inference + """ + + inputs: str + """The input text data""" + parameters: Optional[SummarizationGenerationParameters] = None + """Additional inference parameters""" + + +@dataclass +class SummarizationOutput(BaseInferenceType): + """Outputs of inference for the Summarization task""" + + summary_text: str + """The summarized text.""" diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..0d63072590aaacf813d062d7f63780c51adee06d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py @@ -0,0 +1,139 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Literal, Optional + +from .base import BaseInferenceType + + +TypeEnum = Literal["json", "regex"] + + +@dataclass +class TextGenerationInputGrammarType(BaseInferenceType): + type: "TypeEnum" + value: Any + """A string that represents a [JSON Schema](https://json-schema.org/). + JSON Schema is a declarative language that allows to annotate JSON documents + with types and descriptions. + """ + + +@dataclass +class TextGenerationInputGenerateParameters(BaseInferenceType): + best_of: Optional[int] = None + decoder_input_details: Optional[bool] = None + details: Optional[bool] = None + do_sample: Optional[bool] = None + frequency_penalty: Optional[float] = None + grammar: Optional[TextGenerationInputGrammarType] = None + max_new_tokens: Optional[int] = None + repetition_penalty: Optional[float] = None + return_full_text: Optional[bool] = None + seed: Optional[int] = None + stop: Optional[List[str]] = None + temperature: Optional[float] = None + top_k: Optional[int] = None + top_n_tokens: Optional[int] = None + top_p: Optional[float] = None + truncate: Optional[int] = None + typical_p: Optional[float] = None + watermark: Optional[bool] = None + + +@dataclass +class TextGenerationInput(BaseInferenceType): + """Text Generation Input. + Auto-generated from TGI specs. + For more details, check out + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + """ + + inputs: str + parameters: Optional[TextGenerationInputGenerateParameters] = None + stream: Optional[bool] = None + + +TextGenerationOutputFinishReason = Literal["length", "eos_token", "stop_sequence"] + + +@dataclass +class TextGenerationOutputPrefillToken(BaseInferenceType): + id: int + logprob: float + text: str + + +@dataclass +class TextGenerationOutputToken(BaseInferenceType): + id: int + logprob: float + special: bool + text: str + + +@dataclass +class TextGenerationOutputBestOfSequence(BaseInferenceType): + finish_reason: "TextGenerationOutputFinishReason" + generated_text: str + generated_tokens: int + prefill: List[TextGenerationOutputPrefillToken] + tokens: List[TextGenerationOutputToken] + seed: Optional[int] = None + top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None + + +@dataclass +class TextGenerationOutputDetails(BaseInferenceType): + finish_reason: "TextGenerationOutputFinishReason" + generated_tokens: int + prefill: List[TextGenerationOutputPrefillToken] + tokens: List[TextGenerationOutputToken] + best_of_sequences: Optional[List[TextGenerationOutputBestOfSequence]] = None + seed: Optional[int] = None + top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None + + +@dataclass +class TextGenerationOutput(BaseInferenceType): + """Text Generation Output. + Auto-generated from TGI specs. + For more details, check out + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + """ + + generated_text: str + details: Optional[TextGenerationOutputDetails] = None + + +@dataclass +class TextGenerationStreamOutputStreamDetails(BaseInferenceType): + finish_reason: "TextGenerationOutputFinishReason" + generated_tokens: int + seed: Optional[int] = None + + +@dataclass +class TextGenerationStreamOutputToken(BaseInferenceType): + id: int + logprob: float + special: bool + text: str + + +@dataclass +class TextGenerationStreamOutput(BaseInferenceType): + """Text Generation Stream Output. + Auto-generated from TGI specs. + For more details, check out + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + """ + + index: int + token: TextGenerationStreamOutputToken + details: Optional[TextGenerationStreamOutputStreamDetails] = None + generated_text: Optional[str] = None + top_tokens: Optional[List[TextGenerationStreamOutputToken]] = None diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab7c14d8ab032c2e9bf24c835520182cb1b5e5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py @@ -0,0 +1,53 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Optional + +from .base import BaseInferenceType + + +@dataclass +class VisualQuestionAnsweringInputData(BaseInferenceType): + """One (image, question) pair to answer""" + + image: Any + """The image.""" + question: Any + """The question to answer based on the image.""" + + +@dataclass +class VisualQuestionAnsweringParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Visual Question Answering + """ + + top_k: Optional[int] = None + """The number of answers to return (will be chosen by order of likelihood). Note that we + return less than topk answers if there are not enough options available within the + context. + """ + + +@dataclass +class VisualQuestionAnsweringInput(BaseInferenceType): + """Inputs for Visual Question Answering inference""" + + inputs: VisualQuestionAnsweringInputData + """One (image, question) pair to answer""" + parameters: Optional[VisualQuestionAnsweringParameters] = None + """Additional inference parameters""" + + +@dataclass +class VisualQuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Visual Question Answering task""" + + label: Any + score: float + """The associated score / probability""" + answer: Optional[str] = None + """The answer to the question""" diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7bd497342c6a934bba489a909cc9c14130d7157 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +# ruff: noqa: F401 + +from huggingface_hub.errors import ( + HFValidationError, + LocalTokenNotFoundError, + NotASafetensorsRepoError, + OfflineModeIsEnabled, + SafetensorsParsingError, +) + +from . import tqdm as _tqdm # _tqdm is the module +from ._cache_assets import cached_assets_path +from ._cache_manager import ( + CachedFileInfo, + CachedRepoInfo, + CachedRevisionInfo, + CacheNotFound, + CorruptedCacheException, + DeleteCacheStrategy, + HFCacheInfo, + scan_cache_dir, +) +from ._chunk_utils import chunk_iterable +from ._datetime import parse_datetime +from ._errors import ( + BadRequestError, + DisabledRepoError, + EntryNotFoundError, + FileMetadataError, + GatedRepoError, + HfHubHTTPError, + LocalEntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + hf_raise_for_status, +) +from ._experimental import experimental +from ._fixes import SoftTemporaryDirectory, WeakFileLock, yaml_dump +from ._git_credential import list_credential_helpers, set_git_credential, unset_git_credential +from ._headers import build_hf_headers, get_token_to_send +from ._hf_folder import HfFolder +from ._http import ( + configure_http_backend, + fix_hf_endpoint_in_url, + get_session, + http_backoff, + reset_sessions, +) +from ._pagination import paginate +from ._paths import DEFAULT_IGNORE_PATTERNS, FORBIDDEN_FOLDERS, filter_repo_objects +from ._runtime import ( + dump_environment_info, + get_aiohttp_version, + get_fastai_version, + get_fastapi_version, + get_fastcore_version, + get_gradio_version, + get_graphviz_version, + get_hf_hub_version, + get_hf_transfer_version, + get_jinja_version, + get_minijinja_version, + get_numpy_version, + get_pillow_version, + get_pydantic_version, + get_pydot_version, + get_python_version, + get_tensorboard_version, + get_tf_version, + get_torch_version, + is_aiohttp_available, + is_fastai_available, + is_fastapi_available, + is_fastcore_available, + is_google_colab, + is_gradio_available, + is_graphviz_available, + is_hf_transfer_available, + is_jinja_available, + is_minijinja_available, + is_notebook, + is_numpy_available, + is_package_available, + is_pillow_available, + is_pydantic_available, + is_pydot_available, + is_safetensors_available, + is_tensorboard_available, + is_tf_available, + is_torch_available, +) +from ._safetensors import ( + SafetensorsFileMetadata, + SafetensorsRepoMetadata, + TensorInfo, +) +from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess +from ._telemetry import send_telemetry +from ._token import get_token +from ._typing import is_jsonable +from ._validators import ( + smoothly_deprecate_use_auth_token, + validate_hf_hub_args, + validate_repo_id, +) +from .tqdm import ( + are_progress_bars_disabled, + disable_progress_bars, + enable_progress_bars, + tqdm, + tqdm_stream_file, +) diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..70e97f0c24101243bce003f0ca55d852c02a48cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py @@ -0,0 +1,397 @@ +import re +from typing import Optional + +from requests import HTTPError, Response + +from ._fixes import JSONDecodeError + + +REPO_API_REGEX = re.compile( + r""" + # staging or production endpoint + ^https://[^/]+ + ( + # on /api/repo_type/repo_id + /api/(models|datasets|spaces)/(.+) + | + # or /repo_id/resolve/revision/... + /(.+)/resolve/(.+) + ) + """, + flags=re.VERBOSE, +) + + +class FileMetadataError(OSError): + """Error triggered when the metadata of a file on the Hub cannot be retrieved (missing ETag or commit_hash). + + Inherits from `OSError` for backward compatibility. + """ + + +class HfHubHTTPError(HTTPError): + """ + HTTPError to inherit from for any custom HTTP Error raised in HF Hub. + + Any HTTPError is converted at least into a `HfHubHTTPError`. If some information is + sent back by the server, it will be added to the error message. + + Added details: + - Request id from "X-Request-Id" header if exists. + - Server error message from the header "X-Error-Message". + - Server error message if we can found one in the response body. + + Example: + ```py + import requests + from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError + + response = get_session().post(...) + try: + hf_raise_for_status(response) + except HfHubHTTPError as e: + print(str(e)) # formatted message + e.request_id, e.server_message # details returned by server + + # Complete the error message with additional information once it's raised + e.append_to_message("\n`create_commit` expects the repository to exist.") + raise + ``` + """ + + request_id: Optional[str] = None + server_message: Optional[str] = None + + def __init__(self, message: str, response: Optional[Response] = None): + # Parse server information if any. + if response is not None: + self.request_id = response.headers.get("X-Request-Id") + try: + server_data = response.json() + except JSONDecodeError: + server_data = {} + + # Retrieve server error message from multiple sources + server_message_from_headers = response.headers.get("X-Error-Message") + server_message_from_body = server_data.get("error") + server_multiple_messages_from_body = "\n".join( + error["message"] for error in server_data.get("errors", []) if "message" in error + ) + + # Concatenate error messages + _server_message = "" + if server_message_from_headers is not None: # from headers + _server_message += server_message_from_headers + "\n" + if server_message_from_body is not None: # from body "error" + if isinstance(server_message_from_body, list): + server_message_from_body = "\n".join(server_message_from_body) + if server_message_from_body not in _server_message: + _server_message += server_message_from_body + "\n" + if server_multiple_messages_from_body is not None: # from body "errors" + if server_multiple_messages_from_body not in _server_message: + _server_message += server_multiple_messages_from_body + "\n" + _server_message = _server_message.strip() + + # Set message to `HfHubHTTPError` (if any) + if _server_message != "": + self.server_message = _server_message + + super().__init__( + _format_error_message( + message, + request_id=self.request_id, + server_message=self.server_message, + ), + response=response, # type: ignore + request=response.request if response is not None else None, # type: ignore + ) + + def append_to_message(self, additional_message: str) -> None: + """Append additional information to the `HfHubHTTPError` initial message.""" + self.args = (self.args[0] + additional_message,) + self.args[1:] + + +class RepositoryNotFoundError(HfHubHTTPError): + """ + Raised when trying to access a hf.co URL with an invalid repository name, or + with a private repo name the user does not have access to. + + Example: + + ```py + >>> from huggingface_hub import model_info + >>> model_info("") + (...) + huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: PvMw_VjBMjVdMz53WKIzP) + + Repository Not Found for url: https://huggingface.co/api/models/%3Cnon_existent_repository%3E. + Please make sure you specified the correct `repo_id` and `repo_type`. + If the repo is private, make sure you are authenticated. + Invalid username or password. + ``` + """ + + +class GatedRepoError(RepositoryNotFoundError): + """ + Raised when trying to access a gated repository for which the user is not on the + authorized list. + + Note: derives from `RepositoryNotFoundError` to ensure backward compatibility. + + Example: + + ```py + >>> from huggingface_hub import model_info + >>> model_info("") + (...) + huggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: ViT1Bf7O_026LGSQuVqfa) + + Cannot access gated repo for url https://huggingface.co/api/models/ardent-figment/gated-model. + Access to model ardent-figment/gated-model is restricted and you are not in the authorized list. + Visit https://huggingface.co/ardent-figment/gated-model to ask for access. + ``` + """ + + +class DisabledRepoError(HfHubHTTPError): + """ + Raised when trying to access a repository that has been disabled by its author. + + Example: + + ```py + >>> from huggingface_hub import dataset_info + >>> dataset_info("laion/laion-art") + (...) + huggingface_hub.utils._errors.DisabledRepoError: 403 Client Error. (Request ID: Root=1-659fc3fa-3031673e0f92c71a2260dbe2;bc6f4dfb-b30a-4862-af0a-5cfe827610d8) + + Cannot access repository for url https://huggingface.co/api/datasets/laion/laion-art. + Access to this resource is disabled. + ``` + """ + + +class RevisionNotFoundError(HfHubHTTPError): + """ + Raised when trying to access a hf.co URL with a valid repository but an invalid + revision. + + Example: + + ```py + >>> from huggingface_hub import hf_hub_download + >>> hf_hub_download('bert-base-cased', 'config.json', revision='') + (...) + huggingface_hub.utils._errors.RevisionNotFoundError: 404 Client Error. (Request ID: Mwhe_c3Kt650GcdKEFomX) + + Revision Not Found for url: https://huggingface.co/bert-base-cased/resolve/%3Cnon-existent-revision%3E/config.json. + ``` + """ + + +class EntryNotFoundError(HfHubHTTPError): + """ + Raised when trying to access a hf.co URL with a valid repository and revision + but an invalid filename. + + Example: + + ```py + >>> from huggingface_hub import hf_hub_download + >>> hf_hub_download('bert-base-cased', '') + (...) + huggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: 53pNl6M0MxsnG5Sw8JA6x) + + Entry Not Found for url: https://huggingface.co/bert-base-cased/resolve/main/%3Cnon-existent-file%3E. + ``` + """ + + +class LocalEntryNotFoundError(EntryNotFoundError, FileNotFoundError, ValueError): + """ + Raised when trying to access a file or snapshot that is not on the disk when network is + disabled or unavailable (connection issue). The entry may exist on the Hub. + + Note: `ValueError` type is to ensure backward compatibility. + Note: `LocalEntryNotFoundError` derives from `HTTPError` because of `EntryNotFoundError` + even when it is not a network issue. + + Example: + + ```py + >>> from huggingface_hub import hf_hub_download + >>> hf_hub_download('bert-base-cased', '', local_files_only=True) + (...) + huggingface_hub.utils._errors.LocalEntryNotFoundError: Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co look-ups and downloads online, set 'local_files_only' to False. + ``` + """ + + def __init__(self, message: str): + super().__init__(message, response=None) + + +class BadRequestError(HfHubHTTPError, ValueError): + """ + Raised by `hf_raise_for_status` when the server returns a HTTP 400 error. + + Example: + + ```py + >>> resp = requests.post("hf.co/api/check", ...) + >>> hf_raise_for_status(resp, endpoint_name="check") + huggingface_hub.utils._errors.BadRequestError: Bad request for check endpoint: {details} (Request ID: XXX) + ``` + """ + + +def hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None) -> None: + """ + Internal version of `response.raise_for_status()` that will refine a + potential HTTPError. Raised exception will be an instance of `HfHubHTTPError`. + + This helper is meant to be the unique method to raise_for_status when making a call + to the Hugging Face Hub. + + Example: + ```py + import requests + from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError + + response = get_session().post(...) + try: + hf_raise_for_status(response) + except HfHubHTTPError as e: + print(str(e)) # formatted message + e.request_id, e.server_message # details returned by server + + # Complete the error message with additional information once it's raised + e.append_to_message("\n`create_commit` expects the repository to exist.") + raise + ``` + + Args: + response (`Response`): + Response from the server. + endpoint_name (`str`, *optional*): + Name of the endpoint that has been called. If provided, the error message + will be more complete. + + + + Raises when the request has failed: + + - [`~utils.RepositoryNotFoundError`] + If the repository to download from cannot be found. This may be because it + doesn't exist, because `repo_type` is not set correctly, or because the repo + is `private` and you do not have access. + - [`~utils.GatedRepoError`] + If the repository exists but is gated and the user is not on the authorized + list. + - [`~utils.RevisionNotFoundError`] + If the repository exists but the revision couldn't be find. + - [`~utils.EntryNotFoundError`] + If the repository exists but the entry (e.g. the requested file) couldn't be + find. + - [`~utils.BadRequestError`] + If request failed with a HTTP 400 BadRequest error. + - [`~utils.HfHubHTTPError`] + If request failed for a reason not listed above. + + + """ + try: + response.raise_for_status() + except HTTPError as e: + error_code = response.headers.get("X-Error-Code") + error_message = response.headers.get("X-Error-Message") + + if error_code == "RevisionNotFound": + message = f"{response.status_code} Client Error." + "\n\n" + f"Revision Not Found for url: {response.url}." + raise RevisionNotFoundError(message, response) from e + + elif error_code == "EntryNotFound": + message = f"{response.status_code} Client Error." + "\n\n" + f"Entry Not Found for url: {response.url}." + raise EntryNotFoundError(message, response) from e + + elif error_code == "GatedRepo": + message = ( + f"{response.status_code} Client Error." + "\n\n" + f"Cannot access gated repo for url {response.url}." + ) + raise GatedRepoError(message, response) from e + + elif error_message == "Access to this resource is disabled.": + message = ( + f"{response.status_code} Client Error." + + "\n\n" + + f"Cannot access repository for url {response.url}." + + "\n" + + "Access to this resource is disabled." + ) + raise DisabledRepoError(message, response) from e + + elif error_code == "RepoNotFound" or ( + response.status_code == 401 + and response.request is not None + and response.request.url is not None + and REPO_API_REGEX.search(response.request.url) is not None + ): + # 401 is misleading as it is returned for: + # - private and gated repos if user is not authenticated + # - missing repos + # => for now, we process them as `RepoNotFound` anyway. + # See https://gist.github.com/Wauplin/46c27ad266b15998ce56a6603796f0b9 + message = ( + f"{response.status_code} Client Error." + + "\n\n" + + f"Repository Not Found for url: {response.url}." + + "\nPlease make sure you specified the correct `repo_id` and" + " `repo_type`.\nIf you are trying to access a private or gated repo," + " make sure you are authenticated." + ) + raise RepositoryNotFoundError(message, response) from e + + elif response.status_code == 400: + message = ( + f"\n\nBad request for {endpoint_name} endpoint:" if endpoint_name is not None else "\n\nBad request:" + ) + raise BadRequestError(message, response=response) from e + + elif response.status_code == 403: + message = ( + f"\n\n{response.status_code} Forbidden: {error_message}." + + f"\nCannot access content at: {response.url}." + + "\nIf you are trying to create or update content," + + "make sure you have a token with the `write` role." + ) + raise HfHubHTTPError(message, response=response) from e + + # Convert `HTTPError` into a `HfHubHTTPError` to display request information + # as well (request id and/or server error message) + raise HfHubHTTPError(str(e), response=response) from e + + +def _format_error_message(message: str, request_id: Optional[str], server_message: Optional[str]) -> str: + """ + Format the `HfHubHTTPError` error message based on initial message and information + returned by the server. + + Used when initializing `HfHubHTTPError`. + """ + # Add message from response body + if server_message is not None and len(server_message) > 0 and server_message.lower() not in message.lower(): + if "\n\n" in message: + message += "\n" + server_message + else: + message += "\n\n" + server_message + + # Add Request ID + if request_id is not None and str(request_id).lower() not in message.lower(): + request_id_message = f" (Request ID: {request_id})" + if "\n" in message: + newline_index = message.index("\n") + message = message[:newline_index] + request_id_message + message[newline_index:] + else: + message += request_id_message + + return message diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..34141eba09123c06fbca55c929a19a0264e5788e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to flag a feature as "experimental" in Huggingface Hub.""" + +import warnings +from functools import wraps +from typing import Callable + +from .. import constants + + +def experimental(fn: Callable) -> Callable: + """Decorator to flag a feature as experimental. + + An experimental feature trigger a warning when used as it might be subject to breaking changes in the future. + Warnings can be disabled by setting the environment variable `HF_EXPERIMENTAL_WARNING` to `0`. + + Args: + fn (`Callable`): + The function to flag as experimental. + + Returns: + `Callable`: The decorated function. + + Example: + + ```python + >>> from huggingface_hub.utils import experimental + + >>> @experimental + ... def my_function(): + ... print("Hello world!") + + >>> my_function() + UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. You can disable + this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable. + Hello world! + ``` + """ + # For classes, put the "experimental" around the "__new__" method => __new__ will be removed in warning message + name = fn.__qualname__[: -len(".__new__")] if fn.__qualname__.endswith(".__new__") else fn.__qualname__ + + @wraps(fn) + def _inner_fn(*args, **kwargs): + if not constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING: + warnings.warn( + f"'{name}' is experimental and might be subject to breaking changes in the future." + " You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment" + " variable.", + UserWarning, + ) + return fn(*args, **kwargs) + + return _inner_fn diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py new file mode 100644 index 0000000000000000000000000000000000000000..28810dd7050883daa4204efb75b64900062206fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py @@ -0,0 +1,94 @@ +# JSONDecodeError was introduced in requests=2.27 released in 2022. +# This allows us to support older requests for users +# More information: https://github.com/psf/requests/pull/5856 +try: + from requests import JSONDecodeError # type: ignore # noqa: F401 +except ImportError: + try: + from simplejson import JSONDecodeError # type: ignore # noqa: F401 + except ImportError: + from json import JSONDecodeError # type: ignore # noqa: F401 +import contextlib +import os +import shutil +import stat +import tempfile +from functools import partial +from pathlib import Path +from typing import Callable, Generator, Optional, Union + +import yaml +from filelock import BaseFileLock, FileLock + + +# Wrap `yaml.dump` to set `allow_unicode=True` by default. +# +# Example: +# ```py +# >>> yaml.dump({"emoji": "👀", "some unicode": "日本か"}) +# 'emoji: "\\U0001F440"\nsome unicode: "\\u65E5\\u672C\\u304B"\n' +# +# >>> yaml_dump({"emoji": "👀", "some unicode": "日本か"}) +# 'emoji: "👀"\nsome unicode: "日本か"\n' +# ``` +yaml_dump: Callable[..., str] = partial(yaml.dump, stream=None, allow_unicode=True) # type: ignore + + +@contextlib.contextmanager +def SoftTemporaryDirectory( + suffix: Optional[str] = None, + prefix: Optional[str] = None, + dir: Optional[Union[Path, str]] = None, + **kwargs, +) -> Generator[Path, None, None]: + """ + Context manager to create a temporary directory and safely delete it. + + If tmp directory cannot be deleted normally, we set the WRITE permission and retry. + If cleanup still fails, we give up but don't raise an exception. This is equivalent + to `tempfile.TemporaryDirectory(..., ignore_cleanup_errors=True)` introduced in + Python 3.10. + + See https://www.scivision.dev/python-tempfile-permission-error-windows/. + """ + tmpdir = tempfile.TemporaryDirectory(prefix=prefix, suffix=suffix, dir=dir, **kwargs) + yield Path(tmpdir.name).resolve() + + try: + # First once with normal cleanup + shutil.rmtree(tmpdir.name) + except Exception: + # If failed, try to set write permission and retry + try: + shutil.rmtree(tmpdir.name, onerror=_set_write_permission_and_retry) + except Exception: + pass + + # And finally, cleanup the tmpdir. + # If it fails again, give up but do not throw error + try: + tmpdir.cleanup() + except Exception: + pass + + +def _set_write_permission_and_retry(func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + + +@contextlib.contextmanager +def WeakFileLock(lock_file: Union[str, Path]) -> Generator[BaseFileLock, None, None]: + """A filelock that won't raise an exception if release fails.""" + lock = FileLock(lock_file) + lock.acquire() + + yield lock + + try: + return lock.release() + except OSError: + try: + Path(lock_file).unlink() + except OSError: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ed77f4e49ca88ff4fa9aba48cbf00195036013 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to manage Git credentials.""" + +import re +import subprocess +from typing import List, Optional + +from ..constants import ENDPOINT +from ._subprocess import run_interactive_subprocess, run_subprocess + + +GIT_CREDENTIAL_REGEX = re.compile( + r""" + ^\s* # start of line + credential\.helper # credential.helper value + \s*=\s* # separator + (\w+) # the helper name (group 1) + (\s|$) # whitespace or end of line + """, + flags=re.MULTILINE | re.IGNORECASE | re.VERBOSE, +) + + +def list_credential_helpers(folder: Optional[str] = None) -> List[str]: + """Return the list of git credential helpers configured. + + See https://git-scm.com/docs/gitcredentials. + + Credentials are saved in all configured helpers (store, cache, macOS keychain,...). + Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential. + + Args: + folder (`str`, *optional*): + The folder in which to check the configured helpers. + """ + try: + output = run_subprocess("git config --list", folder=folder).stdout + parsed = _parse_credential_output(output) + return parsed + except subprocess.CalledProcessError as exc: + raise EnvironmentError(exc.stderr) + + +def set_git_credential(token: str, username: str = "hf_user", folder: Optional[str] = None) -> None: + """Save a username/token pair in git credential for HF Hub registry. + + Credentials are saved in all configured helpers (store, cache, macOS keychain,...). + Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential. + + Args: + username (`str`, defaults to `"hf_user"`): + A git username. Defaults to `"hf_user"`, the default user used in the Hub. + token (`str`, defaults to `"hf_user"`): + A git password. In practice, the User Access Token for the Hub. + See https://huggingface.co/settings/tokens. + folder (`str`, *optional*): + The folder in which to check the configured helpers. + """ + with run_interactive_subprocess("git credential approve", folder=folder) as ( + stdin, + _, + ): + stdin.write(f"url={ENDPOINT}\nusername={username.lower()}\npassword={token}\n\n") + stdin.flush() + + +def unset_git_credential(username: str = "hf_user", folder: Optional[str] = None) -> None: + """Erase credentials from git credential for HF Hub registry. + + Credentials are erased from the configured helpers (store, cache, macOS + keychain,...), if any. If `username` is not provided, any credential configured for + HF Hub endpoint is erased. + Calls "`git credential erase`" internally. See https://git-scm.com/docs/git-credential. + + Args: + username (`str`, defaults to `"hf_user"`): + A git username. Defaults to `"hf_user"`, the default user used in the Hub. + folder (`str`, *optional*): + The folder in which to check the configured helpers. + """ + with run_interactive_subprocess("git credential reject", folder=folder) as ( + stdin, + _, + ): + standard_input = f"url={ENDPOINT}\n" + if username is not None: + standard_input += f"username={username.lower()}\n" + standard_input += "\n" + + stdin.write(standard_input) + stdin.flush() + + +def _parse_credential_output(output: str) -> List[str]: + """Parse the output of `git credential fill` to extract the password. + + Args: + output (`str`): + The output of `git credential fill`. + """ + # NOTE: If user has set an helper for a custom URL, it will not we caught here. + # Example: `credential.https://huggingface.co.helper=store` + # See: https://github.com/huggingface/huggingface_hub/pull/1138#discussion_r1013324508 + return sorted( # Sort for nice printing + set( # Might have some duplicates + match[0] for match in GIT_CREDENTIAL_REGEX.findall(output) + ) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_http.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_http.py new file mode 100644 index 0000000000000000000000000000000000000000..495152ca1596afff353b7d0dd60ba92b729c87c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_http.py @@ -0,0 +1,319 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle HTTP requests in Huggingface Hub.""" + +import io +import os +import threading +import time +import uuid +from functools import lru_cache +from http import HTTPStatus +from typing import Callable, Optional, Tuple, Type, Union + +import requests +from requests import Response +from requests.adapters import HTTPAdapter +from requests.models import PreparedRequest + +from huggingface_hub.errors import OfflineModeIsEnabled + +from .. import constants +from . import logging +from ._typing import HTTP_METHOD_T + + +logger = logging.get_logger(__name__) + +# Both headers are used by the Hub to debug failed requests. +# `X_AMZN_TRACE_ID` is better as it also works to debug on Cloudfront and ALB. +# If `X_AMZN_TRACE_ID` is set, the Hub will use it as well. +X_AMZN_TRACE_ID = "X-Amzn-Trace-Id" +X_REQUEST_ID = "x-request-id" + + +class UniqueRequestIdAdapter(HTTPAdapter): + X_AMZN_TRACE_ID = "X-Amzn-Trace-Id" + + def add_headers(self, request, **kwargs): + super().add_headers(request, **kwargs) + + # Add random request ID => easier for server-side debug + if X_AMZN_TRACE_ID not in request.headers: + request.headers[X_AMZN_TRACE_ID] = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4()) + + # Add debug log + has_token = str(request.headers.get("authorization", "")).startswith("Bearer hf_") + logger.debug( + f"Request {request.headers[X_AMZN_TRACE_ID]}: {request.method} {request.url} (authenticated: {has_token})" + ) + + def send(self, request: PreparedRequest, *args, **kwargs) -> Response: + """Catch any RequestException to append request id to the error message for debugging.""" + try: + return super().send(request, *args, **kwargs) + except requests.RequestException as e: + request_id = request.headers.get(X_AMZN_TRACE_ID) + if request_id is not None: + # Taken from https://stackoverflow.com/a/58270258 + e.args = (*e.args, f"(Request ID: {request_id})") + raise + + +class OfflineAdapter(HTTPAdapter): + def send(self, request: PreparedRequest, *args, **kwargs) -> Response: + raise OfflineModeIsEnabled( + f"Cannot reach {request.url}: offline mode is enabled. To disable it, please unset the `HF_HUB_OFFLINE` environment variable." + ) + + +def _default_backend_factory() -> requests.Session: + session = requests.Session() + if constants.HF_HUB_OFFLINE: + session.mount("http://", OfflineAdapter()) + session.mount("https://", OfflineAdapter()) + else: + session.mount("http://", UniqueRequestIdAdapter()) + session.mount("https://", UniqueRequestIdAdapter()) + return session + + +BACKEND_FACTORY_T = Callable[[], requests.Session] +_GLOBAL_BACKEND_FACTORY: BACKEND_FACTORY_T = _default_backend_factory + + +def configure_http_backend(backend_factory: BACKEND_FACTORY_T = _default_backend_factory) -> None: + """ + Configure the HTTP backend by providing a `backend_factory`. Any HTTP calls made by `huggingface_hub` will use a + Session object instantiated by this factory. This can be useful if you are running your scripts in a specific + environment requiring custom configuration (e.g. custom proxy or certifications). + + Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe, + `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory` + set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between + calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned. + + See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`. + + Example: + ```py + import requests + from huggingface_hub import configure_http_backend, get_session + + # Create a factory function that returns a Session with configured proxies + def backend_factory() -> requests.Session: + session = requests.Session() + session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"} + return session + + # Set it as the default session factory + configure_http_backend(backend_factory=backend_factory) + + # In practice, this is mostly done internally in `huggingface_hub` + session = get_session() + ``` + """ + global _GLOBAL_BACKEND_FACTORY + _GLOBAL_BACKEND_FACTORY = backend_factory + reset_sessions() + + +def get_session() -> requests.Session: + """ + Get a `requests.Session` object, using the session factory from the user. + + Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe, + `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory` + set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between + calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned. + + See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`. + + Example: + ```py + import requests + from huggingface_hub import configure_http_backend, get_session + + # Create a factory function that returns a Session with configured proxies + def backend_factory() -> requests.Session: + session = requests.Session() + session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"} + return session + + # Set it as the default session factory + configure_http_backend(backend_factory=backend_factory) + + # In practice, this is mostly done internally in `huggingface_hub` + session = get_session() + ``` + """ + return _get_session_from_cache(process_id=os.getpid(), thread_id=threading.get_ident()) + + +def reset_sessions() -> None: + """Reset the cache of sessions. + + Mostly used internally when sessions are reconfigured or an SSLError is raised. + See [`configure_http_backend`] for more details. + """ + _get_session_from_cache.cache_clear() + + +@lru_cache +def _get_session_from_cache(process_id: int, thread_id: int) -> requests.Session: + """ + Create a new session per thread using global factory. Using LRU cache (maxsize 128) to avoid memory leaks when + using thousands of threads. Cache is cleared when `configure_http_backend` is called. + """ + return _GLOBAL_BACKEND_FACTORY() + + +def http_backoff( + method: HTTP_METHOD_T, + url: str, + *, + max_retries: int = 5, + base_wait_time: float = 1, + max_wait_time: float = 8, + retry_on_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = ( + requests.Timeout, + requests.ConnectionError, + ), + retry_on_status_codes: Union[int, Tuple[int, ...]] = HTTPStatus.SERVICE_UNAVAILABLE, + **kwargs, +) -> Response: + """Wrapper around requests to retry calls on an endpoint, with exponential backoff. + + Endpoint call is retried on exceptions (ex: connection timeout, proxy error,...) + and/or on specific status codes (ex: service unavailable). If the call failed more + than `max_retries`, the exception is thrown or `raise_for_status` is called on the + response object. + + Re-implement mechanisms from the `backoff` library to avoid adding an external + dependencies to `hugging_face_hub`. See https://github.com/litl/backoff. + + Args: + method (`Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]`): + HTTP method to perform. + url (`str`): + The URL of the resource to fetch. + max_retries (`int`, *optional*, defaults to `5`): + Maximum number of retries, defaults to 5 (no retries). + base_wait_time (`float`, *optional*, defaults to `1`): + Duration (in seconds) to wait before retrying the first time. + Wait time between retries then grows exponentially, capped by + `max_wait_time`. + max_wait_time (`float`, *optional*, defaults to `8`): + Maximum duration (in seconds) to wait before retrying. + retry_on_exceptions (`Type[Exception]` or `Tuple[Type[Exception]]`, *optional*): + Define which exceptions must be caught to retry the request. Can be a single type or a tuple of types. + By default, retry on `requests.Timeout` and `requests.ConnectionError`. + retry_on_status_codes (`int` or `Tuple[int]`, *optional*, defaults to `503`): + Define on which status codes the request must be retried. By default, only + HTTP 503 Service Unavailable is retried. + **kwargs (`dict`, *optional*): + kwargs to pass to `requests.request`. + + Example: + ``` + >>> from huggingface_hub.utils import http_backoff + + # Same usage as "requests.request". + >>> response = http_backoff("GET", "https://www.google.com") + >>> response.raise_for_status() + + # If you expect a Gateway Timeout from time to time + >>> http_backoff("PUT", upload_url, data=data, retry_on_status_codes=504) + >>> response.raise_for_status() + ``` + + + + When using `requests` it is possible to stream data by passing an iterator to the + `data` argument. On http backoff this is a problem as the iterator is not reset + after a failed call. This issue is mitigated for file objects or any IO streams + by saving the initial position of the cursor (with `data.tell()`) and resetting the + cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff + will fail. If this is a hard constraint for you, please let us know by opening an + issue on [Github](https://github.com/huggingface/huggingface_hub). + + + """ + if isinstance(retry_on_exceptions, type): # Tuple from single exception type + retry_on_exceptions = (retry_on_exceptions,) + + if isinstance(retry_on_status_codes, int): # Tuple from single status code + retry_on_status_codes = (retry_on_status_codes,) + + nb_tries = 0 + sleep_time = base_wait_time + + # If `data` is used and is a file object (or any IO), it will be consumed on the + # first HTTP request. We need to save the initial position so that the full content + # of the file is re-sent on http backoff. See warning tip in docstring. + io_obj_initial_pos = None + if "data" in kwargs and isinstance(kwargs["data"], io.IOBase): + io_obj_initial_pos = kwargs["data"].tell() + + session = get_session() + while True: + nb_tries += 1 + try: + # If `data` is used and is a file object (or any IO), set back cursor to + # initial position. + if io_obj_initial_pos is not None: + kwargs["data"].seek(io_obj_initial_pos) + + # Perform request and return if status_code is not in the retry list. + response = session.request(method=method, url=url, **kwargs) + if response.status_code not in retry_on_status_codes: + return response + + # Wrong status code returned (HTTP 503 for instance) + logger.warning(f"HTTP Error {response.status_code} thrown while requesting {method} {url}") + if nb_tries > max_retries: + response.raise_for_status() # Will raise uncaught exception + # We return response to avoid infinite loop in the corner case where the + # user ask for retry on a status code that doesn't raise_for_status. + return response + + except retry_on_exceptions as err: + logger.warning(f"'{err}' thrown while requesting {method} {url}") + + if isinstance(err, requests.ConnectionError): + reset_sessions() # In case of SSLError it's best to reset the shared requests.Session objects + + if nb_tries > max_retries: + raise err + + # Sleep for X seconds + logger.warning(f"Retrying in {sleep_time}s [Retry {nb_tries}/{max_retries}].") + time.sleep(sleep_time) + + # Update sleep time for next retry + sleep_time = min(max_wait_time, sleep_time * 2) # Exponential backoff + + +def fix_hf_endpoint_in_url(url: str, endpoint: Optional[str]) -> str: + """Replace the default endpoint in a URL by a custom one. + + This is useful when using a proxy and the Hugging Face Hub returns a URL with the default endpoint. + """ + endpoint = endpoint or constants.ENDPOINT + # check if a proxy has been set => if yes, update the returned URL to use the proxy + if endpoint not in (None, constants._HF_DEFAULT_ENDPOINT, constants._HF_DEFAULT_STAGING_ENDPOINT): + url = url.replace(constants._HF_DEFAULT_ENDPOINT, endpoint) + url = url.replace(constants._HF_DEFAULT_STAGING_ENDPOINT, endpoint) + return url diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ab4fe7cba9bd13f01d9c81854a00fd30b7f0d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle pagination on Huggingface Hub.""" + +from typing import Dict, Iterable, Optional + +import requests + +from . import get_session, hf_raise_for_status, logging + + +logger = logging.get_logger(__name__) + + +def paginate(path: str, params: Dict, headers: Dict) -> Iterable: + """Fetch a list of models/datasets/spaces and paginate through results. + + This is using the same "Link" header format as GitHub. + See: + - https://requests.readthedocs.io/en/latest/api/#requests.Response.links + - https://docs.github.com/en/rest/guides/traversing-with-pagination#link-header + """ + session = get_session() + r = session.get(path, params=params, headers=headers) + hf_raise_for_status(r) + yield from r.json() + + # Follow pages + # Next link already contains query params + next_page = _get_next_page(r) + while next_page is not None: + logger.debug(f"Pagination detected. Requesting next page: {next_page}") + r = session.get(next_page, headers=headers) + hf_raise_for_status(r) + yield from r.json() + next_page = _get_next_page(r) + + +def _get_next_page(response: requests.Response) -> Optional[str]: + return response.links.get("next", {}).get("url") diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_paths.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..2361db6d0e2bbe233bcb7456f3c760079ae317f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_paths.py @@ -0,0 +1,130 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle paths in Huggingface Hub.""" + +from fnmatch import fnmatch +from pathlib import Path +from typing import Callable, Generator, Iterable, List, Optional, TypeVar, Union + + +T = TypeVar("T") + +# Always ignore `.git` and `.huggingface` folders in commits +DEFAULT_IGNORE_PATTERNS = [ + ".git", + ".git/*", + "*/.git", + "**/.git/**", + ".huggingface", + ".huggingface/*", + "*/.huggingface", + "**/.huggingface/**", +] +# Forbidden to commit these folders +FORBIDDEN_FOLDERS = [".git", ".huggingface"] + + +def filter_repo_objects( + items: Iterable[T], + *, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + key: Optional[Callable[[T], str]] = None, +) -> Generator[T, None, None]: + """Filter repo objects based on an allowlist and a denylist. + + Input must be a list of paths (`str` or `Path`) or a list of arbitrary objects. + In the later case, `key` must be provided and specifies a function of one argument + that is used to extract a path from each element in iterable. + + Patterns are Unix shell-style wildcards which are NOT regular expressions. See + https://docs.python.org/3/library/fnmatch.html for more details. + + Args: + items (`Iterable`): + List of items to filter. + allow_patterns (`str` or `List[str]`, *optional*): + Patterns constituting the allowlist. If provided, item paths must match at + least one pattern from the allowlist. + ignore_patterns (`str` or `List[str]`, *optional*): + Patterns constituting the denylist. If provided, item paths must not match + any patterns from the denylist. + key (`Callable[[T], str]`, *optional*): + Single-argument function to extract a path from each item. If not provided, + the `items` must already be `str` or `Path`. + + Returns: + Filtered list of objects, as a generator. + + Raises: + :class:`ValueError`: + If `key` is not provided and items are not `str` or `Path`. + + Example usage with paths: + ```python + >>> # Filter only PDFs that are not hidden. + >>> list(filter_repo_objects( + ... ["aaa.PDF", "bbb.jpg", ".ccc.pdf", ".ddd.png"], + ... allow_patterns=["*.pdf"], + ... ignore_patterns=[".*"], + ... )) + ["aaa.pdf"] + ``` + + Example usage with objects: + ```python + >>> list(filter_repo_objects( + ... [ + ... CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf") + ... CommitOperationAdd(path_or_fileobj="/tmp/bbb.jpg", path_in_repo="bbb.jpg") + ... CommitOperationAdd(path_or_fileobj="/tmp/.ccc.pdf", path_in_repo=".ccc.pdf") + ... CommitOperationAdd(path_or_fileobj="/tmp/.ddd.png", path_in_repo=".ddd.png") + ... ], + ... allow_patterns=["*.pdf"], + ... ignore_patterns=[".*"], + ... key=lambda x: x.repo_in_path + ... )) + [CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")] + ``` + """ + if isinstance(allow_patterns, str): + allow_patterns = [allow_patterns] + + if isinstance(ignore_patterns, str): + ignore_patterns = [ignore_patterns] + + if key is None: + + def _identity(item: T) -> str: + if isinstance(item, str): + return item + if isinstance(item, Path): + return str(item) + raise ValueError(f"Please provide `key` argument in `filter_repo_objects`: `{item}` is not a string.") + + key = _identity # Items must be `str` or `Path`, otherwise raise ValueError + + for item in items: + path = key(item) + + # Skip if there's an allowlist and path doesn't match any + if allow_patterns is not None and not any(fnmatch(path, r) for r in allow_patterns): + continue + + # Skip if there's a denylist and path matches any + if ignore_patterns is not None and any(fnmatch(path, r) for r in ignore_patterns): + continue + + yield item diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_subprocess.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..a09e0d58868ecd699ea3a3e503a8a702d25c8ea5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_subprocess.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +"""Contains utilities to easily handle subprocesses in `huggingface_hub`.""" + +import os +import subprocess +import sys +from contextlib import contextmanager +from io import StringIO +from pathlib import Path +from typing import IO, Generator, List, Optional, Tuple, Union + +from .logging import get_logger + + +logger = get_logger(__name__) + + +@contextmanager +def capture_output() -> Generator[StringIO, None, None]: + """Capture output that is printed to terminal. + + Taken from https://stackoverflow.com/a/34738440 + + Example: + ```py + >>> with capture_output() as output: + ... print("hello world") + >>> assert output.getvalue() == "hello world\n" + ``` + """ + output = StringIO() + previous_output = sys.stdout + sys.stdout = output + yield output + sys.stdout = previous_output + + +def run_subprocess( + command: Union[str, List[str]], + folder: Optional[Union[str, Path]] = None, + check=True, + **kwargs, +) -> subprocess.CompletedProcess: + """ + Method to run subprocesses. Calling this will capture the `stderr` and `stdout`, + please call `subprocess.run` manually in case you would like for them not to + be captured. + + Args: + command (`str` or `List[str]`): + The command to execute as a string or list of strings. + folder (`str`, *optional*): + The folder in which to run the command. Defaults to current working + directory (from `os.getcwd()`). + check (`bool`, *optional*, defaults to `True`): + Setting `check` to `True` will raise a `subprocess.CalledProcessError` + when the subprocess has a non-zero exit code. + kwargs (`Dict[str]`): + Keyword arguments to be passed to the `subprocess.run` underlying command. + + Returns: + `subprocess.CompletedProcess`: The completed process. + """ + if isinstance(command, str): + command = command.split() + + if isinstance(folder, Path): + folder = str(folder) + + return subprocess.run( + command, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + check=check, + encoding="utf-8", + errors="replace", # if not utf-8, replace char by � + cwd=folder or os.getcwd(), + **kwargs, + ) + + +@contextmanager +def run_interactive_subprocess( + command: Union[str, List[str]], + folder: Optional[Union[str, Path]] = None, + **kwargs, +) -> Generator[Tuple[IO[str], IO[str]], None, None]: + """Run a subprocess in an interactive mode in a context manager. + + Args: + command (`str` or `List[str]`): + The command to execute as a string or list of strings. + folder (`str`, *optional*): + The folder in which to run the command. Defaults to current working + directory (from `os.getcwd()`). + kwargs (`Dict[str]`): + Keyword arguments to be passed to the `subprocess.run` underlying command. + + Returns: + `Tuple[IO[str], IO[str]]`: A tuple with `stdin` and `stdout` to interact + with the process (input and output are utf-8 encoded). + + Example: + ```python + with _interactive_subprocess("git credential-store get") as (stdin, stdout): + # Write to stdin + stdin.write("url=hf.co\nusername=obama\n".encode("utf-8")) + stdin.flush() + + # Read from stdout + output = stdout.read().decode("utf-8") + ``` + """ + if isinstance(command, str): + command = command.split() + + with subprocess.Popen( + command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + errors="replace", # if not utf-8, replace char by � + cwd=folder or os.getcwd(), + **kwargs, + ) as process: + assert process.stdin is not None, "subprocess is opened as subprocess.PIPE" + assert process.stdout is not None, "subprocess is opened as subprocess.PIPE" + yield process.stdin, process.stdout diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_telemetry.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_telemetry.py new file mode 100644 index 0000000000000000000000000000000000000000..5de988e2795188324f69232d1beb68191591715d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_telemetry.py @@ -0,0 +1,118 @@ +from queue import Queue +from threading import Lock, Thread +from typing import Dict, Optional, Union +from urllib.parse import quote + +from .. import constants, logging +from . import build_hf_headers, get_session, hf_raise_for_status + + +logger = logging.get_logger(__name__) + +# Telemetry is sent by a separate thread to avoid blocking the main thread. +# A daemon thread is started once and consume tasks from the _TELEMETRY_QUEUE. +# If the thread stops for some reason -shouldn't happen-, we restart a new one. +_TELEMETRY_THREAD: Optional[Thread] = None +_TELEMETRY_THREAD_LOCK = Lock() # Lock to avoid starting multiple threads in parallel +_TELEMETRY_QUEUE: Queue = Queue() + + +def send_telemetry( + topic: str, + *, + library_name: Optional[str] = None, + library_version: Optional[str] = None, + user_agent: Union[Dict, str, None] = None, +) -> None: + """ + Sends telemetry that helps tracking usage of different HF libraries. + + This usage data helps us debug issues and prioritize new features. However, we understand that not everyone wants + to share additional information, and we respect your privacy. You can disable telemetry collection by setting the + `HF_HUB_DISABLE_TELEMETRY=1` as environment variable. Telemetry is also disabled in offline mode (i.e. when setting + `HF_HUB_OFFLINE=1`). + + Telemetry collection is run in a separate thread to minimize impact for the user. + + Args: + topic (`str`): + Name of the topic that is monitored. The topic is directly used to build the URL. If you want to monitor + subtopics, just use "/" separation. Examples: "gradio", "transformers/examples",... + library_name (`str`, *optional*): + The name of the library that is making the HTTP request. Will be added to the user-agent header. + library_version (`str`, *optional*): + The version of the library that is making the HTTP request. Will be added to the user-agent header. + user_agent (`str`, `dict`, *optional*): + The user agent info in the form of a dictionary or a single string. It will be completed with information about the installed packages. + + Example: + ```py + >>> from huggingface_hub.utils import send_telemetry + + # Send telemetry without library information + >>> send_telemetry("ping") + + # Send telemetry to subtopic with library information + >>> send_telemetry("gradio/local_link", library_name="gradio", library_version="3.22.1") + + # Send telemetry with additional data + >>> send_telemetry( + ... topic="examples", + ... library_name="transformers", + ... library_version="4.26.0", + ... user_agent={"pipeline": "text_classification", "framework": "flax"}, + ... ) + ``` + """ + if constants.HF_HUB_OFFLINE or constants.HF_HUB_DISABLE_TELEMETRY: + return + + _start_telemetry_thread() # starts thread only if doesn't exist yet + _TELEMETRY_QUEUE.put( + {"topic": topic, "library_name": library_name, "library_version": library_version, "user_agent": user_agent} + ) + + +def _start_telemetry_thread(): + """Start a daemon thread to consume tasks from the telemetry queue. + + If the thread is interrupted, start a new one. + """ + with _TELEMETRY_THREAD_LOCK: # avoid to start multiple threads if called concurrently + global _TELEMETRY_THREAD + if _TELEMETRY_THREAD is None or not _TELEMETRY_THREAD.is_alive(): + _TELEMETRY_THREAD = Thread(target=_telemetry_worker, daemon=True) + _TELEMETRY_THREAD.start() + + +def _telemetry_worker(): + """Wait for a task and consume it.""" + while True: + kwargs = _TELEMETRY_QUEUE.get() + _send_telemetry_in_thread(**kwargs) + _TELEMETRY_QUEUE.task_done() + + +def _send_telemetry_in_thread( + topic: str, + *, + library_name: Optional[str] = None, + library_version: Optional[str] = None, + user_agent: Union[Dict, str, None] = None, +) -> None: + """Contains the actual data sending data to the Hub.""" + path = "/".join(quote(part) for part in topic.split("/") if len(part) > 0) + try: + r = get_session().head( + f"{constants.ENDPOINT}/api/telemetry/{path}", + headers=build_hf_headers( + token=False, # no need to send a token for telemetry + library_name=library_name, + library_version=library_version, + user_agent=user_agent, + ), + ) + hf_raise_for_status(r) + except Exception as e: + # We don't want to error in case of connection errors of any kind. + logger.debug(f"Error while sending telemetry: {e}") diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/LICENSE.txt b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..de9a582603ca6aa895136e2b118443e9397897da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2007,2008 David M. Cooke +Copyright (c) 2009,2010 Francesc Alted +Copyright (c) 2011- See AUTHORS.txt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..916848776a2ce7238db352bcda5082ebb64d1300 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b478a39d42cc72de19a88b2887c7b19a9bfbcc15 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e4d9e659789931d02ce1d8ba3c8237363171479 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e9b8fc4a809e160648f5f76cc11316e33034b34 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08a722b09caa4e27d4f1d2c54a48864937ed2fd0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..08e2e13478365fb7b227b461418d2f31e3cb76d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__init__.py @@ -0,0 +1,90 @@ +""" +================================ +Datasets (:mod:`scipy.datasets`) +================================ + +.. currentmodule:: scipy.datasets + +Dataset Methods +=============== + +.. autosummary:: + :toctree: generated/ + + ascent + face + electrocardiogram + +Utility Methods +=============== + +.. autosummary:: + :toctree: generated/ + + download_all -- Download all the dataset files to specified path. + clear_cache -- Clear cached dataset directory. + + +Usage of Datasets +================= + +SciPy dataset methods can be simply called as follows: ``'()'`` +This downloads the dataset files over the network once, and saves the cache, +before returning a `numpy.ndarray` object representing the dataset. + +Note that the return data structure and data type might be different for +different dataset methods. For a more detailed example on usage, please look +into the particular dataset method documentation above. + + +How dataset retrieval and storage works +======================================= + +SciPy dataset files are stored within individual github repositories under the +SciPy GitHub organization, following a naming convention as +``'dataset-'``, for example `scipy.datasets.face` files live at +https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes +and depends on `Pooch `_, a Python +package built to simplify fetching data files. Pooch uses these repos to +retrieve the respective dataset files when calling the dataset function. + +A registry of all the datasets, essentially a mapping of filenames with their +SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify +the downloads on function call. After downloading the dataset once, the files +are saved in the system cache directory under ``'scipy-data'``. + +Dataset cache locations may vary on different platforms. + +For macOS:: + + '~/Library/Caches/scipy-data' + +For Linux and other Unix-like platforms:: + + '~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined + +For Windows:: + + 'C:\\Users\\\\AppData\\Local\\\\scipy-data\\Cache' + + +In environments with constrained network connectivity for various security +reasons or on systems without continuous internet connections, one may manually +load the cache of the datasets by placing the contents of the dataset repo in +the above mentioned cache directory to avoid fetching dataset errors without +the internet connectivity. + +""" + + +from ._fetchers import face, ascent, electrocardiogram +from ._download_all import download_all +from ._utils import clear_cache + +__all__ = ['ascent', 'electrocardiogram', 'face', + 'download_all', 'clear_cache'] + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_download_all.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_download_all.py new file mode 100644 index 0000000000000000000000000000000000000000..255fdcaf22950848f458a7ed9ada183e0a2e630e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_download_all.py @@ -0,0 +1,57 @@ +""" +Platform independent script to download all the +`scipy.datasets` module data files. +This doesn't require a full scipy build. + +Run: python _download_all.py +""" + +import argparse +try: + import pooch +except ImportError: + pooch = None + + +if __package__ is None or __package__ == '': + # Running as python script, use absolute import + import _registry # type: ignore +else: + # Running as python module, use relative import + from . import _registry + + +def download_all(path=None): + """ + Utility method to download all the dataset files + for `scipy.datasets` module. + + Parameters + ---------- + path : str, optional + Directory path to download all the dataset files. + If None, default to the system cache_dir detected by pooch. + """ + if pooch is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + if path is None: + path = pooch.os_cache('scipy-data') + for dataset_name, dataset_hash in _registry.registry.items(): + pooch.retrieve(url=_registry.registry_urls[dataset_name], + known_hash=dataset_hash, + fname=dataset_name, path=path) + + +def main(): + parser = argparse.ArgumentParser(description='Download SciPy data files.') + parser.add_argument("path", nargs='?', type=str, + default=pooch.os_cache('scipy-data'), + help="Directory path to download all the data files.") + args = parser.parse_args() + download_all(args.path) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_fetchers.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_fetchers.py new file mode 100644 index 0000000000000000000000000000000000000000..51dfbc4498f9d55c61a8d22b9d7db8c21cfa5a68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_fetchers.py @@ -0,0 +1,220 @@ +from numpy import array, frombuffer, load +from ._registry import registry, registry_urls + +try: + import pooch +except ImportError: + pooch = None + data_fetcher = None +else: + data_fetcher = pooch.create( + # Use the default cache folder for the operating system + # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to + # select an appropriate directory for the cache on each platform. + path=pooch.os_cache("scipy-data"), + + # The remote data is on Github + # base_url is a required param, even though we override this + # using individual urls in the registry. + base_url="https://github.com/scipy/", + registry=registry, + urls=registry_urls + ) + + +def fetch_data(dataset_name, data_fetcher=data_fetcher): + if data_fetcher is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + # The "fetch" method returns the full path to the downloaded data file. + return data_fetcher.fetch(dataset_name) + + +def ascent(): + """ + Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy + use in demos. + + The image is derived from accent-to-the-top.jpg at + http://www.public-domain-image.com/people-public-domain-images-pictures/ + + Parameters + ---------- + None + + Returns + ------- + ascent : ndarray + convenient image to use for testing and demonstration + + Examples + -------- + >>> import scipy.datasets + >>> ascent = scipy.datasets.ascent() + >>> ascent.shape + (512, 512) + >>> ascent.max() + 255 + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(ascent) + >>> plt.show() + + """ + import pickle + + # The file will be downloaded automatically the first time this is run, + # returning the path to the downloaded file. Afterwards, Pooch finds + # it in the local cache and doesn't repeat the download. + fname = fetch_data("ascent.dat") + # Now we just need to load it with our standard Python tools. + with open(fname, 'rb') as f: + ascent = array(pickle.load(f)) + return ascent + + +def electrocardiogram(): + """ + Load an electrocardiogram as an example for a 1-D signal. + + The returned signal is a 5 minute long electrocardiogram (ECG), a medical + recording of the heart's electrical activity, sampled at 360 Hz. + + Returns + ------- + ecg : ndarray + The electrocardiogram in millivolt (mV) sampled at 360 Hz. + + Notes + ----- + The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_ + (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on + PhysioNet [2]_. The excerpt includes noise induced artifacts, typical + heartbeats as well as pathological changes. + + .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208 + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database. + IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001). + (PMID: 11446209); :doi:`10.13026/C2F305` + .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, + Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank, + PhysioToolkit, and PhysioNet: Components of a New Research Resource + for Complex Physiologic Signals. Circulation 101(23):e215-e220; + :doi:`10.1161/01.CIR.101.23.e215` + + Examples + -------- + >>> from scipy.datasets import electrocardiogram + >>> ecg = electrocardiogram() + >>> ecg + array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385]) + >>> ecg.shape, ecg.mean(), ecg.std() + ((108000,), -0.16510875, 0.5992473991177294) + + As stated the signal features several areas with a different morphology. + E.g., the first few seconds show the electrical activity of a heart in + normal sinus rhythm as seen below. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> fs = 360 + >>> time = np.arange(ecg.size) / fs + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(9, 10.2) + >>> plt.ylim(-1, 1.5) + >>> plt.show() + + After second 16, however, the first premature ventricular contractions, + also called extrasystoles, appear. These have a different morphology + compared to typical heartbeats. The difference can easily be observed + in the following plot. + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(46.5, 50) + >>> plt.ylim(-2, 1.5) + >>> plt.show() + + At several points large artifacts disturb the recording, e.g.: + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(207, 215) + >>> plt.ylim(-2, 3.5) + >>> plt.show() + + Finally, examining the power spectrum reveals that most of the biosignal is + made up of lower frequencies. At 60 Hz the noise induced by the mains + electricity can be clearly observed. + + >>> from scipy.signal import welch + >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum") + >>> plt.semilogy(f, Pxx) + >>> plt.xlabel("Frequency in Hz") + >>> plt.ylabel("Power spectrum of the ECG in mV**2") + >>> plt.xlim(f[[0, -1]]) + >>> plt.show() + """ + fname = fetch_data("ecg.dat") + with load(fname) as file: + ecg = file["ecg"].astype(int) # np.uint16 -> int + # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain + ecg = (ecg - 1024) / 200.0 + return ecg + + +def face(gray=False): + """ + Get a 1024 x 768, color image of a raccoon face. + + raccoon-procyon-lotor.jpg at http://www.public-domain-image.com + + Parameters + ---------- + gray : bool, optional + If True return 8-bit grey-scale image, otherwise return a color image + + Returns + ------- + face : ndarray + image of a raccoon face + + Examples + -------- + >>> import scipy.datasets + >>> face = scipy.datasets.face() + >>> face.shape + (768, 1024, 3) + >>> face.max() + 255 + >>> face.dtype + dtype('uint8') + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(face) + >>> plt.show() + + """ + import bz2 + fname = fetch_data("face.dat") + with open(fname, 'rb') as f: + rawdata = f.read() + face_data = bz2.decompress(rawdata) + face = frombuffer(face_data, dtype='uint8') + face.shape = (768, 1024, 3) + if gray is True: + face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] + + 0.07 * face[:, :, 2]).astype('uint8') + return face diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_registry.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..969384ad9843159e766100bfa9755aed8102dd09 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_registry.py @@ -0,0 +1,26 @@ +########################################################################## +# This file serves as the dataset registry for SciPy Datasets SubModule. +########################################################################## + + +# To generate the SHA256 hash, use the command +# openssl sha256 +registry = { + "ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2", + "ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf", + "face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886" +} + +registry_urls = { + "ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat", + "ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat", + "face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat" +} + +# dataset method mapping with their associated filenames +# : ["filename1", "filename2", ...] +method_files_map = { + "ascent": ["ascent.dat"], + "electrocardiogram": ["ecg.dat"], + "face": ["face.dat"] +} diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_utils.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f644f8797d6e3256a16ec2c509eec725c726300 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/_utils.py @@ -0,0 +1,81 @@ +import os +import shutil +from ._registry import method_files_map + +try: + import platformdirs +except ImportError: + platformdirs = None # type: ignore[assignment] + + +def _clear_cache(datasets, cache_dir=None, method_map=None): + if method_map is None: + # Use SciPy Datasets method map + method_map = method_files_map + if cache_dir is None: + # Use default cache_dir path + if platformdirs is None: + # platformdirs is pooch dependency + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + cache_dir = platformdirs.user_cache_dir("scipy-data") + + if not os.path.exists(cache_dir): + print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.") + return + + if datasets is None: + print(f"Cleaning the cache directory {cache_dir}!") + shutil.rmtree(cache_dir) + else: + if not isinstance(datasets, (list, tuple)): + # single dataset method passed should be converted to list + datasets = [datasets, ] + for dataset in datasets: + assert callable(dataset) + dataset_name = dataset.__name__ # Name of the dataset method + if dataset_name not in method_map: + raise ValueError(f"Dataset method {dataset_name} doesn't " + "exist. Please check if the passed dataset " + "is a subset of the following dataset " + f"methods: {list(method_map.keys())}") + + data_files = method_map[dataset_name] + data_filepaths = [os.path.join(cache_dir, file) + for file in data_files] + for data_filepath in data_filepaths: + if os.path.exists(data_filepath): + print("Cleaning the file " + f"{os.path.split(data_filepath)[1]} " + f"for dataset {dataset_name}") + os.remove(data_filepath) + else: + print(f"Path {data_filepath} doesn't exist. " + "Nothing to clear.") + + +def clear_cache(datasets=None): + """ + Cleans the scipy datasets cache directory. + + If a scipy.datasets method or a list/tuple of the same is + provided, then clear_cache removes all the data files + associated to the passed dataset method callable(s). + + By default, it removes all the cached data files. + + Parameters + ---------- + datasets : callable or list/tuple of callable or None + + Examples + -------- + >>> from scipy import datasets + >>> ascent_array = datasets.ascent() + >>> ascent_array.shape + (512, 512) + >>> datasets.clear_cache([datasets.ascent]) + Cleaning the file ascent.dat for dataset ascent + """ + _clear_cache(datasets) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_arraytools.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_arraytools.py new file mode 100644 index 0000000000000000000000000000000000000000..87ce75d8d892a64021da7abc5d149556c22cf983 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_arraytools.py @@ -0,0 +1,264 @@ +""" +Functions for acting on a axis of an array. +""" +import numpy as np + + +def axis_slice(a, start=None, stop=None, step=None, axis=-1): + """Take a slice along axis 'axis' from 'a'. + + Parameters + ---------- + a : numpy.ndarray + The array to be sliced. + start, stop, step : int or None + The slice parameters. + axis : int, optional + The axis of `a` to be sliced. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import axis_slice + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> axis_slice(a, start=0, stop=1, axis=1) + array([[1], + [4], + [7]]) + >>> axis_slice(a, start=1, axis=0) + array([[4, 5, 6], + [7, 8, 9]]) + + Notes + ----- + The keyword arguments start, stop and step are used by calling + slice(start, stop, step). This implies axis_slice() does not + handle its arguments the exactly the same as indexing. To select + a single index k, for example, use + axis_slice(a, start=k, stop=k+1) + In this case, the length of the axis 'axis' in the result will + be 1; the trivial dimension is not removed. (Use numpy.squeeze() + to remove trivial axes.) + """ + a_slice = [slice(None)] * a.ndim + a_slice[axis] = slice(start, stop, step) + b = a[tuple(a_slice)] + return b + + +def axis_reverse(a, axis=-1): + """Reverse the 1-D slices of `a` along axis `axis`. + + Returns axis_slice(a, step=-1, axis=axis). + """ + return axis_slice(a, step=-1, axis=axis) + + +def odd_ext(x, n, axis=-1): + """ + Odd extension at the boundaries of an array + + Generate a new ndarray by making an odd extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import odd_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> odd_ext(a, 2) + array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [-4, -1, 0, 1, 4, 9, 16, 23, 28]]) + + Odd extension is a "180 degree rotation" at the endpoints of the original + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = odd_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='odd extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_end = axis_slice(x, start=0, stop=1, axis=axis) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((2 * left_end - left_ext, + x, + 2 * right_end - right_ext), + axis=axis) + return ext + + +def even_ext(x, n, axis=-1): + """ + Even extension at the boundaries of an array + + Generate a new ndarray by making an even extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import even_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> even_ext(a, 2) + array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3], + [ 4, 1, 0, 1, 4, 9, 16, 9, 4]]) + + Even extension is a "mirror image" at the boundaries of the original array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = even_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='even extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def const_ext(x, n, axis=-1): + """ + Constant extension at the boundaries of an array + + Generate a new ndarray that is a constant extension of `x` along an axis. + + The extension repeats the values at the first and last element of + the axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import const_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> const_ext(a, 2) + array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], + [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) + + Constant extension continues with the same values as the endpoints of the + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = const_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='constant extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + left_end = axis_slice(x, start=0, stop=1, axis=axis) + ones_shape = [1] * x.ndim + ones_shape[axis] = n + ones = np.ones(ones_shape, dtype=x.dtype) + left_ext = ones * left_end + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = ones * right_end + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def zero_ext(x, n, axis=-1): + """ + Zero padding at the boundaries of an array + + Generate a new ndarray that is a zero-padded extension of `x` along + an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the + axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import zero_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> zero_ext(a, 2) + array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], + [ 0, 0, 0, 1, 4, 9, 16, 0, 0]]) + """ + if n < 1: + return x + zeros_shape = list(x.shape) + zeros_shape[axis] = n + zeros = np.zeros(zeros_shape, dtype=x.dtype) + ext = np.concatenate((zeros, x, zeros), axis=axis) + return ext + + +def _validate_fs(fs, allow_none=True): + """ + Check if the given sampling frequency is a scalar and raises an exception + otherwise. If allow_none is False, also raises an exception for none + sampling rates. Returns the sampling frequency as float or none if the + input is none. + """ + if fs is None: + if not allow_none: + raise ValueError("Sampling frequency can not be none.") + else: # should be float + if not np.isscalar(fs): + raise ValueError("Sampling frequency fs must be a single scalar.") + fs = float(fs) + return fs diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_czt.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..c5e5715b460fb2719b68d4694474bc1efc0a9fa0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_czt.py @@ -0,0 +1,575 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +""" +Chirp z-transform. + +We provide two interfaces to the chirp z-transform: an object interface +which precalculates part of the transform and can be applied efficiently +to many different data sets, and a functional interface which is applied +only to the given data set. + +Transforms +---------- + +CZT : callable (x, axis=-1) -> array + Define a chirp z-transform that can be applied to different signals. +ZoomFFT : callable (x, axis=-1) -> array + Define a Fourier transform on a range of frequencies. + +Functions +--------- + +czt : array + Compute the chirp z-transform for a signal. +zoom_fft : array + Compute the Fourier transform on a range of frequencies. +""" + +import cmath +import numbers +import numpy as np +from numpy import pi, arange +from scipy.fft import fft, ifft, next_fast_len + +__all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points'] + + +def _validate_sizes(n, m): + if n < 1 or not isinstance(n, numbers.Integral): + raise ValueError('Invalid number of CZT data ' + f'points ({n}) specified. ' + 'n must be positive and integer type.') + + if m is None: + m = n + elif m < 1 or not isinstance(m, numbers.Integral): + raise ValueError('Invalid number of CZT output ' + f'points ({m}) specified. ' + 'm must be positive and integer type.') + + return m + + +def czt_points(m, w=None, a=1+0j): + """ + Return the points at which the chirp z-transform is computed. + + Parameters + ---------- + m : int + The number of points desired. + w : complex, optional + The ratio between points in each step. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + out : ndarray + The points in the Z plane at which `CZT` samples the z-transform, + when called with arguments `m`, `w`, and `a`, as complex numbers. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + czt : Convenience function for quickly calculating CZT. + + Examples + -------- + Plot the points of a 16-point FFT: + + >>> import numpy as np + >>> from scipy.signal import czt_points + >>> points = czt_points(16) + >>> import matplotlib.pyplot as plt + >>> plt.plot(points.real, points.imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + + and a 91-point logarithmic spiral that crosses the unit circle: + + >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6) + >>> points = czt_points(m, w, a) + >>> plt.plot(points.real, points.imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + """ + m = _validate_sizes(1, m) + + k = arange(m) + + a = 1.0 * a # at least float + + if w is None: + # Nothing specified, default to FFT + return a * np.exp(2j * pi * k / m) + else: + # w specified + w = 1.0 * w # at least float + return a * w**-k + + +class CZT: + """ + Create a callable chirp z-transform function. + + Transform to compute the frequency response around a spiral. + Objects of this class are callables which can compute the + chirp z-transform on their inputs. This object precalculates the constant + chirps used in the given transform. + + Parameters + ---------- + n : int + The size of the signal. + m : int, optional + The number of output points desired. Default is `n`. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + f : CZT + Callable object ``f(x, axis=-1)`` for computing the chirp z-transform + on `x`. + + See Also + -------- + czt : Convenience function for quickly calculating CZT. + ZoomFFT : Class that creates a callable partial FFT function. + + Notes + ----- + The defaults are chosen such that ``f(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to + ``fft.fft(x, m)``. + + If `w` does not lie on the unit circle, then the transform will be + around a spiral with exponentially-increasing radius. Regardless, + angle will increase linearly. + + For transforms that do lie on the unit circle, accuracy is better when + using `ZoomFFT`, since any numerical error in `w` is + accumulated for long data lengths, drifting away from the unit circle. + + The chirp z-transform can be faster than an equivalent FFT with + zero padding. Try it with your own array sizes to see. + + However, the chirp z-transform is considerably less precise than the + equivalent zero-padded FFT. + + As this CZT is implemented using the Bluestein algorithm, it can compute + large prime-length Fourier transforms in O(N log N) time, rather than the + O(N**2) time required by the direct DFT calculation. (`scipy.fft` also + uses Bluestein's algorithm'.) + + (The name "chirp z-transform" comes from the use of a chirp in the + Bluestein algorithm. It does not decompose signals into chirps, like + other transforms with "chirp" in the name.) + + References + ---------- + .. [1] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and + its application," Bell Syst. Tech. J. 48, 1249-1292 (1969). + + Examples + -------- + Compute multiple prime-length FFTs: + + >>> from scipy.signal import CZT + >>> import numpy as np + >>> a = np.random.rand(7) + >>> b = np.random.rand(7) + >>> c = np.random.rand(7) + >>> czt_7 = CZT(n=7) + >>> A = czt_7(a) + >>> B = czt_7(b) + >>> C = czt_7(c) + + Display the points at which the FFT is calculated: + + >>> czt_7.points() + array([ 1.00000000+0.j , 0.62348980+0.78183148j, + -0.22252093+0.97492791j, -0.90096887+0.43388374j, + -0.90096887-0.43388374j, -0.22252093-0.97492791j, + 0.62348980-0.78183148j]) + >>> import matplotlib.pyplot as plt + >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + """ + + def __init__(self, n, m=None, w=None, a=1+0j): + m = _validate_sizes(n, m) + + k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) + + if w is None: + # Nothing specified, default to FFT-like + w = cmath.exp(-2j*pi/m) + wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m) + else: + # w specified + wk2 = w**(k**2/2.) + + a = 1.0 * a # at least float + + self.w, self.a = w, a + self.m, self.n = m, n + + nfft = next_fast_len(n + m - 1) + self._Awk2 = a**-k[:n] * wk2[:n] + self._nfft = nfft + self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + def __call__(self, x, *, axis=-1): + """ + Calculate the chirp z-transform of a signal. + + Parameters + ---------- + x : array + The signal to transform. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + """ + x = np.asarray(x) + if x.shape[axis] != self.n: + raise ValueError(f"CZT defined for length {self.n}, not " + f"{x.shape[axis]}") + # Calculate transpose coordinates, to allow operation on any given axis + trnsp = np.arange(x.ndim) + trnsp[[axis, -1]] = [-1, axis] + x = x.transpose(*trnsp) + y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft)) + y = y[..., self._yidx] * self._wk2 + return y.transpose(*trnsp) + + def points(self): + """ + Return the points at which the chirp z-transform is computed. + """ + return czt_points(self.m, self.w, self.a) + + +class ZoomFFT(CZT): + """ + Create a callable zoom FFT transform function. + + This is a specialization of the chirp z-transform (`CZT`) for a set of + equally-spaced frequencies around the unit circle, used to calculate a + section of the FFT more efficiently than calculating the entire FFT and + truncating. + + Parameters + ---------- + n : int + The size of the signal. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. Default is `n`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + + Returns + ------- + f : ZoomFFT + Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`. + + See Also + -------- + zoom_fft : Convenience function for calculating a zoom FFT. + + Notes + ----- + The defaults are chosen such that ``f(x, 2)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to + ``fft.fft(x, m)``. + + Sampling frequency is 1/dt, the time step between samples in the + signal `x`. The unit circle corresponds to frequencies from 0 up + to the sampling frequency. The default sampling frequency of 2 + means that `f1`, `f2` values up to the Nyquist frequency are in the + range [0, 1). For `f1`, `f2` values expressed in radians, a sampling + frequency of 2*pi should be used. + + Remember that a zoom FFT can only interpolate the points of the existing + FFT. It cannot help to resolve two separate nearby frequencies. + Frequency resolution can only be increased by increasing acquisition + time. + + These functions are implemented using Bluestein's algorithm (as is + `scipy.fft`). [2]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 29 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + .. [2] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + + Examples + -------- + To plot the transform results use something like the following: + + >>> import numpy as np + >>> from scipy.signal import ZoomFFT + >>> t = np.linspace(0, 1, 1021) + >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) + >>> f1, f2 = 5, 27 + >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021) + >>> X = transform(x) + >>> f = np.linspace(f1, f2, len(x)) + >>> import matplotlib.pyplot as plt + >>> plt.plot(f, 20*np.log10(np.abs(X))) + >>> plt.show() + """ + + def __init__(self, n, fn, m=None, *, fs=2, endpoint=False): + m = _validate_sizes(n, m) + + k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) + + if np.size(fn) == 2: + f1, f2 = fn + elif np.size(fn) == 1: + f1, f2 = 0.0, fn + else: + raise ValueError('fn must be a scalar or 2-length sequence') + + self.f1, self.f2, self.fs = f1, f2, fs + + if endpoint: + scale = ((f2 - f1) * m) / (fs * (m - 1)) + else: + scale = (f2 - f1) / fs + a = cmath.exp(2j * pi * f1/fs) + wk2 = np.exp(-(1j * pi * scale * k**2) / m) + + self.w = cmath.exp(-2j*pi/m * scale) + self.a = a + self.m, self.n = m, n + + ak = np.exp(-2j * pi * f1/fs * k[:n]) + self._Awk2 = ak * wk2[:n] + + nfft = next_fast_len(n + m - 1) + self._nfft = nfft + self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + +def czt(x, m=None, w=None, a=1+0j, *, axis=-1): + """ + Compute the frequency response around a spiral in the Z plane. + + Parameters + ---------- + x : array + The signal to transform. + m : int, optional + The number of output points desired. Default is the length of the + input data. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + zoom_fft : Convenience function for partial FFT calculations. + + Notes + ----- + The defaults are chosen such that ``signal.czt(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is + equivalent to ``fft.fft(x, m)``. + + If the transform needs to be repeated, use `CZT` to construct a + specialized transform function which can be reused without + recomputing constants. + + An example application is in system identification, repeatedly evaluating + small slices of the z-transform of a system, around where a pole is + expected to exist, to refine the estimate of the pole's true location. [1]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 20 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + + Examples + -------- + Generate a sinusoid: + + >>> import numpy as np + >>> f1, f2, fs = 8, 10, 200 # Hz + >>> t = np.linspace(0, 1, fs, endpoint=False) + >>> x = np.sin(2*np.pi*t*f2) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, x) + >>> plt.axis([0, 1, -1.1, 1.1]) + >>> plt.show() + + Its discrete Fourier transform has all of its energy in a single frequency + bin: + + >>> from scipy.fft import rfft, rfftfreq + >>> from scipy.signal import czt, czt_points + >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) + >>> plt.margins(0, 0.1) + >>> plt.show() + + However, if the sinusoid is logarithmically-decaying: + + >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2) + >>> plt.plot(t, x) + >>> plt.axis([0, 1, -1.1, 1.1]) + >>> plt.show() + + the DFT will have spectral leakage: + + >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) + >>> plt.margins(0, 0.1) + >>> plt.show() + + While the DFT always samples the z-transform around the unit circle, the + chirp z-transform allows us to sample the Z-transform along any + logarithmic spiral, such as a circle with radius smaller than unity: + + >>> M = fs // 2 # Just positive frequencies, like rfft + >>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1 + >>> w = np.exp(-1j*np.pi/M) # "Step size" of circle + >>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist + >>> plt.plot(points.real, points.imag, '.') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05]) + >>> plt.show() + + With the correct radius, this transforms the decaying sinusoid (and others + with the same decay rate) without spectral leakage: + + >>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft + >>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma + >>> plt.plot(freqs, abs(z_vals)) + >>> plt.margins(0, 0.1) + >>> plt.show() + """ + x = np.asarray(x) + transform = CZT(x.shape[axis], m=m, w=w, a=a) + return transform(x, axis=axis) + + +def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1): + """ + Compute the DFT of `x` only for frequencies in range `fn`. + + Parameters + ---------- + x : array + The signal to transform. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. The default is the length of `x`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + The transformed signal. The Fourier transform will be calculated + at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m. + + See Also + -------- + ZoomFFT : Class that creates a callable partial FFT function. + + Notes + ----- + The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent + to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)`` + is equivalent to ``fft.fft(x, m)``. + + To graph the magnitude of the resulting transform, use:: + + plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m))) + + If the transform needs to be repeated, use `ZoomFFT` to construct + a specialized transform function which can be reused without + recomputing constants. + + Examples + -------- + To plot the transform results use something like the following: + + >>> import numpy as np + >>> from scipy.signal import zoom_fft + >>> t = np.linspace(0, 1, 1021) + >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) + >>> f1, f2 = 5, 27 + >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021) + >>> f = np.linspace(f1, f2, len(x)) + >>> import matplotlib.pyplot as plt + >>> plt.plot(f, 20*np.log10(np.abs(X))) + >>> plt.show() + """ + x = np.asarray(x) + transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint) + return transform(x, axis=axis) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..418bca652022b2dbb09d35b5f1def383f2c238c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py @@ -0,0 +1,1301 @@ +"""Functions for FIR filter design.""" + +from math import ceil, log +import operator +import warnings + +import numpy as np +from numpy.fft import irfft, fft, ifft +from scipy.special import sinc +from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning, + lstsq) +from scipy._lib.deprecation import _NoValue, _deprecate_positional_args +from scipy.signal._arraytools import _validate_fs + +from . import _sigtools + +__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase'] + + +def _get_fs(fs, nyq): + """ + Utility for replacing the argument 'nyq' (with default 1) with 'fs'. + """ + if nyq is _NoValue and fs is None: + fs = 2 + elif nyq is not _NoValue: + if fs is not None: + raise ValueError("Values cannot be given for both 'nyq' and 'fs'.") + msg = ("Keyword argument 'nyq' is deprecated in favour of 'fs' and " + "will be removed in SciPy 1.14.0.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + if nyq is None: + fs = 2 + else: + fs = 2*nyq + return fs + + +# Some notes on function parameters: +# +# `cutoff` and `width` are given as numbers between 0 and 1. These are +# relative frequencies, expressed as a fraction of the Nyquist frequency. +# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width +# of 300 Hz. +# +# The `order` of a FIR filter is one less than the number of taps. +# This is a potential source of confusion, so in the following code, +# we will always use the number of taps as the parameterization of +# the 'size' of the filter. The "number of taps" means the number +# of coefficients, which is the same as the length of the impulse +# response of the filter. + + +def kaiser_beta(a): + """Compute the Kaiser parameter `beta`, given the attenuation `a`. + + Parameters + ---------- + a : float + The desired attenuation in the stopband and maximum ripple in + the passband, in dB. This should be a *positive* number. + + Returns + ------- + beta : float + The `beta` parameter to be used in the formula for a Kaiser window. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. + + Examples + -------- + Suppose we want to design a lowpass filter, with 65 dB attenuation + in the stop band. The Kaiser window parameter to be used in the + window method is computed by ``kaiser_beta(65)``: + + >>> from scipy.signal import kaiser_beta + >>> kaiser_beta(65) + 6.20426 + + """ + if a > 50: + beta = 0.1102 * (a - 8.7) + elif a > 21: + beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) + else: + beta = 0.0 + return beta + + +def kaiser_atten(numtaps, width): + """Compute the attenuation of a Kaiser FIR filter. + + Given the number of taps `N` and the transition width `width`, compute the + attenuation `a` in dB, given by Kaiser's formula: + + a = 2.285 * (N - 1) * pi * width + 7.95 + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. + width : float + The desired width of the transition region between passband and + stopband (or, in general, at any discontinuity) for the filter, + expressed as a fraction of the Nyquist frequency. + + Returns + ------- + a : float + The attenuation of the ripple, in dB. + + See Also + -------- + kaiserord, kaiser_beta + + Examples + -------- + Suppose we want to design a FIR filter using the Kaiser window method + that will have 211 taps and a transition width of 9 Hz for a signal that + is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency, + the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB) + is computed as follows: + + >>> from scipy.signal import kaiser_atten + >>> kaiser_atten(211, 0.0375) + 64.48099630593983 + + """ + a = 2.285 * (numtaps - 1) * np.pi * width + 7.95 + return a + + +def kaiserord(ripple, width): + """ + Determine the filter window parameters for the Kaiser window method. + + The parameters returned by this function are generally used to create + a finite impulse response filter using the window method, with either + `firwin` or `firwin2`. + + Parameters + ---------- + ripple : float + Upper bound for the deviation (in dB) of the magnitude of the + filter's frequency response from that of the desired filter (not + including frequencies in any transition intervals). That is, if w + is the frequency expressed as a fraction of the Nyquist frequency, + A(w) is the actual frequency response of the filter and D(w) is the + desired frequency response, the design requirement is that:: + + abs(A(w) - D(w))) < 10**(-ripple/20) + + for 0 <= w <= 1 and w not in a transition interval. + width : float + Width of transition region, normalized so that 1 corresponds to pi + radians / sample. That is, the frequency is expressed as a fraction + of the Nyquist frequency. + + Returns + ------- + numtaps : int + The length of the Kaiser window. + beta : float + The beta parameter for the Kaiser window. + + See Also + -------- + kaiser_beta, kaiser_atten + + Notes + ----- + There are several ways to obtain the Kaiser window: + + - ``signal.windows.kaiser(numtaps, beta, sym=True)`` + - ``signal.get_window(beta, numtaps)`` + - ``signal.get_window(('kaiser', beta), numtaps)`` + + The empirical equations discovered by Kaiser are used. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476. + + Examples + -------- + We will use the Kaiser window method to design a lowpass FIR filter + for a signal that is sampled at 1000 Hz. + + We want at least 65 dB rejection in the stop band, and in the pass + band the gain should vary no more than 0.5%. + + We want a cutoff frequency of 175 Hz, with a transition between the + pass band and the stop band of 24 Hz. That is, in the band [0, 163], + the gain varies no more than 0.5%, and in the band [187, 500], the + signal is attenuated by at least 65 dB. + + >>> import numpy as np + >>> from scipy.signal import kaiserord, firwin, freqz + >>> import matplotlib.pyplot as plt + >>> fs = 1000.0 + >>> cutoff = 175 + >>> width = 24 + + The Kaiser method accepts just a single parameter to control the pass + band ripple and the stop band rejection, so we use the more restrictive + of the two. In this case, the pass band ripple is 0.005, or 46.02 dB, + so we will use 65 dB as the design parameter. + + Use `kaiserord` to determine the length of the filter and the + parameter for the Kaiser window. + + >>> numtaps, beta = kaiserord(65, width/(0.5*fs)) + >>> numtaps + 167 + >>> beta + 6.20426 + + Use `firwin` to create the FIR filter. + + >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta), + ... scale=False, fs=fs) + + Compute the frequency response of the filter. ``w`` is the array of + frequencies, and ``h`` is the corresponding complex array of frequency + responses. + + >>> w, h = freqz(taps, worN=8000) + >>> w *= 0.5*fs/np.pi # Convert w to Hz. + + Compute the deviation of the magnitude of the filter's response from + that of the ideal lowpass filter. Values in the transition region are + set to ``nan``, so they won't appear in the plot. + + >>> ideal = w < cutoff # The "ideal" frequency response. + >>> deviation = np.abs(np.abs(h) - ideal) + >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan + + Plot the deviation. A close look at the left end of the stop band shows + that the requirement for 65 dB attenuation is violated in the first lobe + by about 0.125 dB. This is not unusual for the Kaiser window method. + + >>> plt.plot(w, 20*np.log10(np.abs(deviation))) + >>> plt.xlim(0, 0.5*fs) + >>> plt.ylim(-90, -60) + >>> plt.grid(alpha=0.25) + >>> plt.axhline(-65, color='r', ls='--', alpha=0.3) + >>> plt.xlabel('Frequency (Hz)') + >>> plt.ylabel('Deviation from ideal (dB)') + >>> plt.title('Lowpass Filter Frequency Response') + >>> plt.show() + + """ + A = abs(ripple) # in case somebody is confused as to what's meant + if A < 8: + # Formula for N is not valid in this range. + raise ValueError("Requested maximum ripple attenuation %f is too " + "small for the Kaiser formula." % A) + beta = kaiser_beta(A) + + # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter + # order, so we have to add 1 to get the number of taps. + numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1 + + return int(ceil(numtaps)), beta + + +@_deprecate_positional_args(version="1.14") +def firwin(numtaps, cutoff, *, width=None, window='hamming', pass_zero=True, + scale=True, nyq=_NoValue, fs=None): + """ + FIR filter design using the window method. + + This function computes the coefficients of a finite impulse response + filter. The filter will have linear phase; it will be Type I if + `numtaps` is odd and Type II if `numtaps` is even. + + Type II filters always have zero response at the Nyquist frequency, so a + ValueError exception is raised if firwin is called with `numtaps` even and + having a passband whose right end is at the Nyquist frequency. + + Parameters + ---------- + numtaps : int + Length of the filter (number of coefficients, i.e. the filter + order + 1). `numtaps` must be odd if a passband includes the + Nyquist frequency. + cutoff : float or 1-D array_like + Cutoff frequency of filter (expressed in the same units as `fs`) + OR an array of cutoff frequencies (that is, band edges). In the + latter case, the frequencies in `cutoff` should be positive and + monotonically increasing between 0 and `fs/2`. The values 0 and + `fs/2` must not be included in `cutoff`. + width : float or None, optional + If `width` is not None, then assume it is the approximate width + of the transition region (expressed in the same units as `fs`) + for use in Kaiser FIR filter design. In this case, the `window` + argument is ignored. + window : string or tuple of string and parameter values, optional + Desired window to use. See `scipy.signal.get_window` for a list + of windows and required parameters. + pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + If True, the gain at the frequency 0 (i.e., the "DC gain") is 1. + If False, the DC gain is 0. Can also be a string argument for the + desired filter type (equivalent to ``btype`` in IIR design functions). + + .. versionadded:: 1.3.0 + Support for string arguments. + scale : bool, optional + Set to True to scale the coefficients so that the frequency + response is exactly unity at a certain frequency. + That frequency is either: + + - 0 (DC) if the first passband starts at 0 (i.e. pass_zero + is True) + - `fs/2` (the Nyquist frequency) if the first passband ends at + `fs/2` (i.e the filter is a single band highpass filter); + center of first passband otherwise + + nyq : float, optional, deprecated + This is the Nyquist frequency. Each frequency in `cutoff` must be + between 0 and `nyq`. Default is 1. + + .. deprecated:: 1.0.0 + `firwin` keyword argument `nyq` is deprecated in favour of `fs` and + will be removed in SciPy 1.14.0. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + h : (numtaps,) ndarray + Coefficients of length `numtaps` FIR filter. + + Raises + ------ + ValueError + If any value in `cutoff` is less than or equal to 0 or greater + than or equal to ``fs/2``, if the values in `cutoff` are not strictly + monotonically increasing, or if `numtaps` is even but a passband + includes the Nyquist frequency. + + See Also + -------- + firwin2 + firls + minimum_phase + remez + + Examples + -------- + Low-pass from 0 to f: + + >>> from scipy import signal + >>> numtaps = 3 + >>> f = 0.1 + >>> signal.firwin(numtaps, f) + array([ 0.06799017, 0.86401967, 0.06799017]) + + Use a specific window function: + + >>> signal.firwin(numtaps, f, window='nuttall') + array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04]) + + High-pass ('stop' from 0 to f): + + >>> signal.firwin(numtaps, f, pass_zero=False) + array([-0.00859313, 0.98281375, -0.00859313]) + + Band-pass: + + >>> f1, f2 = 0.1, 0.2 + >>> signal.firwin(numtaps, [f1, f2], pass_zero=False) + array([ 0.06301614, 0.88770441, 0.06301614]) + + Band-stop: + + >>> signal.firwin(numtaps, [f1, f2]) + array([-0.00801395, 1.0160279 , -0.00801395]) + + Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]): + + >>> f3, f4 = 0.3, 0.4 + >>> signal.firwin(numtaps, [f1, f2, f3, f4]) + array([-0.01376344, 1.02752689, -0.01376344]) + + Multi-band (passbands are [f1, f2] and [f3,f4]): + + >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) + array([ 0.04890915, 0.91284326, 0.04890915]) + + """ + # The major enhancements to this function added in November 2010 were + # developed by Tom Krauss (see ticket #902). + fs = _validate_fs(fs, allow_none=True) + + nyq = 0.5 * _get_fs(fs, nyq) + + cutoff = np.atleast_1d(cutoff) / float(nyq) + + # Check for invalid input. + if cutoff.ndim > 1: + raise ValueError("The cutoff argument must be at most " + "one-dimensional.") + if cutoff.size == 0: + raise ValueError("At least one cutoff frequency must be given.") + if cutoff.min() <= 0 or cutoff.max() >= 1: + raise ValueError("Invalid cutoff frequency: frequencies must be " + "greater than 0 and less than fs/2.") + if np.any(np.diff(cutoff) <= 0): + raise ValueError("Invalid cutoff frequencies: the frequencies " + "must be strictly increasing.") + + if width is not None: + # A width was given. Find the beta parameter of the Kaiser window + # and set `window`. This overrides the value of `window` passed in. + atten = kaiser_atten(numtaps, float(width) / nyq) + beta = kaiser_beta(atten) + window = ('kaiser', beta) + + if isinstance(pass_zero, str): + if pass_zero in ('bandstop', 'lowpass'): + if pass_zero == 'lowpass': + if cutoff.size != 1: + raise ValueError('cutoff must have one element if ' + f'pass_zero=="lowpass", got {cutoff.shape}') + elif cutoff.size <= 1: + raise ValueError('cutoff must have at least two elements if ' + f'pass_zero=="bandstop", got {cutoff.shape}') + pass_zero = True + elif pass_zero in ('bandpass', 'highpass'): + if pass_zero == 'highpass': + if cutoff.size != 1: + raise ValueError('cutoff must have one element if ' + f'pass_zero=="highpass", got {cutoff.shape}') + elif cutoff.size <= 1: + raise ValueError('cutoff must have at least two elements if ' + f'pass_zero=="bandpass", got {cutoff.shape}') + pass_zero = False + else: + raise ValueError('pass_zero must be True, False, "bandpass", ' + '"lowpass", "highpass", or "bandstop", got ' + f'{pass_zero}') + pass_zero = bool(operator.index(pass_zero)) # ensure bool-like + + pass_nyquist = bool(cutoff.size & 1) ^ pass_zero + if pass_nyquist and numtaps % 2 == 0: + raise ValueError("A filter with an even number of coefficients must " + "have zero response at the Nyquist frequency.") + + # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff + # is even, and each pair in cutoff corresponds to passband. + cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) + + # `bands` is a 2-D array; each row gives the left and right edges of + # a passband. + bands = cutoff.reshape(-1, 2) + + # Build up the coefficients. + alpha = 0.5 * (numtaps - 1) + m = np.arange(0, numtaps) - alpha + h = 0 + for left, right in bands: + h += right * sinc(right * m) + h -= left * sinc(left * m) + + # Get and apply the window function. + from .windows import get_window + win = get_window(window, numtaps, fftbins=False) + h *= win + + # Now handle scaling if desired. + if scale: + # Get the first passband. + left, right = bands[0] + if left == 0: + scale_frequency = 0.0 + elif right == 1: + scale_frequency = 1.0 + else: + scale_frequency = 0.5 * (left + right) + c = np.cos(np.pi * m * scale_frequency) + s = np.sum(h * c) + h /= s + + return h + + +# Original version of firwin2 from scipy ticket #457, submitted by "tash". +# +# Rewritten by Warren Weckesser, 2010. +@_deprecate_positional_args(version="1.14") +def firwin2(numtaps, freq, gain, *, nfreqs=None, window='hamming', nyq=_NoValue, + antisymmetric=False, fs=None): + """ + FIR filter design using the window method. + + From the given frequencies `freq` and corresponding gains `gain`, + this function constructs an FIR filter with linear phase and + (approximately) the given frequency response. + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be less than + `nfreqs`. + freq : array_like, 1-D + The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being + Nyquist. The Nyquist frequency is half `fs`. + The values in `freq` must be nondecreasing. A value can be repeated + once to implement a discontinuity. The first value in `freq` must + be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must + not be repeated. + gain : array_like + The filter gains at the frequency sampling points. Certain + constraints to gain values, depending on the filter type, are applied, + see Notes for details. + nfreqs : int, optional + The size of the interpolation mesh used to construct the filter. + For most efficient behavior, this should be a power of 2 plus 1 + (e.g, 129, 257, etc). The default is one more than the smallest + power of 2 that is not less than `numtaps`. `nfreqs` must be greater + than `numtaps`. + window : string or (string, float) or float, or None, optional + Window function to use. Default is "hamming". See + `scipy.signal.get_window` for the complete list of possible values. + If None, no window function is applied. + nyq : float, optional, deprecated + This is the Nyquist frequency. Each frequency in `freq` must be + between 0 and `nyq`. Default is 1. + + .. deprecated:: 1.0.0 + `firwin2` keyword argument `nyq` is deprecated in favour of `fs` and + will be removed in SciPy 1.14.0. + antisymmetric : bool, optional + Whether resulting impulse response is symmetric/antisymmetric. + See Notes for more details. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + taps : ndarray + The filter coefficients of the FIR filter, as a 1-D array of length + `numtaps`. + + See Also + -------- + firls + firwin + minimum_phase + remez + + Notes + ----- + From the given set of frequencies and gains, the desired response is + constructed in the frequency domain. The inverse FFT is applied to the + desired response to create the associated convolution kernel, and the + first `numtaps` coefficients of this kernel, scaled by `window`, are + returned. + + The FIR filter will have linear phase. The type of filter is determined by + the value of 'numtaps` and `antisymmetric` flag. + There are four possible combinations: + + - odd `numtaps`, `antisymmetric` is False, type I filter is produced + - even `numtaps`, `antisymmetric` is False, type II filter is produced + - odd `numtaps`, `antisymmetric` is True, type III filter is produced + - even `numtaps`, `antisymmetric` is True, type IV filter is produced + + Magnitude response of all but type I filters are subjects to following + constraints: + + - type II -- zero at the Nyquist frequency + - type III -- zero at zero and Nyquist frequencies + - type IV -- zero at zero frequency + + .. versionadded:: 0.9.0 + + References + ---------- + .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal + Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989). + (See, for example, Section 7.4.) + + .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital + Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm + + Examples + -------- + A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and + that decreases linearly on [0.5, 1.0] from 1 to 0: + + >>> from scipy import signal + >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + >>> print(taps[72:78]) + [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961] + + """ + fs = _validate_fs(fs, allow_none=True) + nyq = 0.5 * _get_fs(fs, nyq) + + if len(freq) != len(gain): + raise ValueError('freq and gain must be of same length.') + + if nfreqs is not None and numtaps >= nfreqs: + raise ValueError(('ntaps must be less than nfreqs, but firwin2 was ' + 'called with ntaps=%d and nfreqs=%s') % + (numtaps, nfreqs)) + + if freq[0] != 0 or freq[-1] != nyq: + raise ValueError('freq must start with 0 and end with fs/2.') + d = np.diff(freq) + if (d < 0).any(): + raise ValueError('The values in freq must be nondecreasing.') + d2 = d[:-1] + d[1:] + if (d2 == 0).any(): + raise ValueError('A value in freq must not occur more than twice.') + if freq[1] == 0: + raise ValueError('Value 0 must not be repeated in freq') + if freq[-2] == nyq: + raise ValueError('Value fs/2 must not be repeated in freq') + + if antisymmetric: + if numtaps % 2 == 0: + ftype = 4 + else: + ftype = 3 + else: + if numtaps % 2 == 0: + ftype = 2 + else: + ftype = 1 + + if ftype == 2 and gain[-1] != 0.0: + raise ValueError("A Type II filter must have zero gain at the " + "Nyquist frequency.") + elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): + raise ValueError("A Type III filter must have zero gain at zero " + "and Nyquist frequencies.") + elif ftype == 4 and gain[0] != 0.0: + raise ValueError("A Type IV filter must have zero gain at zero " + "frequency.") + + if nfreqs is None: + nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2))) + + if (d == 0).any(): + # Tweak any repeated values in freq so that interp works. + freq = np.array(freq, copy=True) + eps = np.finfo(float).eps * nyq + for k in range(len(freq) - 1): + if freq[k] == freq[k + 1]: + freq[k] = freq[k] - eps + freq[k + 1] = freq[k + 1] + eps + # Check if freq is strictly increasing after tweak + d = np.diff(freq) + if (d <= 0).any(): + raise ValueError("freq cannot contain numbers that are too close " + "(within eps * (fs/2): " + f"{eps}) to a repeated value") + + # Linearly interpolate the desired response on a uniform mesh `x`. + x = np.linspace(0.0, nyq, nfreqs) + fx = np.interp(x, freq, gain) + + # Adjust the phases of the coefficients so that the first `ntaps` of the + # inverse FFT are the desired filter coefficients. + shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq) + if ftype > 2: + shift *= 1j + + fx2 = fx * shift + + # Use irfft to compute the inverse FFT. + out_full = irfft(fx2) + + if window is not None: + # Create the window to apply to the filter coefficients. + from .windows import get_window + wind = get_window(window, numtaps, fftbins=False) + else: + wind = 1 + + # Keep only the first `numtaps` coefficients in `out`, and multiply by + # the window. + out = out_full[:numtaps] * wind + + if ftype == 3: + out[out.size // 2] = 0.0 + + return out + + +@_deprecate_positional_args(version="1.14") +def remez(numtaps, bands, desired, *, weight=None, Hz=_NoValue, type='bandpass', + maxiter=25, grid_density=16, fs=None): + """ + Calculate the minimax optimal filter using the Remez exchange algorithm. + + Calculate the filter-coefficients for the finite impulse response + (FIR) filter whose transfer function minimizes the maximum error + between the desired gain and the realized gain in the specified + frequency bands using the Remez exchange algorithm. + + Parameters + ---------- + numtaps : int + The desired number of taps in the filter. The number of taps is + the number of terms in the filter, or the filter order plus one. + bands : array_like + A monotonic sequence containing the band edges. + All elements must be non-negative and less than half the sampling + frequency as given by `fs`. + desired : array_like + A sequence half the size of bands containing the desired gain + in each of the specified bands. + weight : array_like, optional + A relative weighting to give to each band region. The length of + `weight` has to be half the length of `bands`. + Hz : scalar, optional, deprecated + The sampling frequency in Hz. Default is 1. + + .. deprecated:: 1.0.0 + `remez` keyword argument `Hz` is deprecated in favour of `fs` and + will be removed in SciPy 1.14.0. + type : {'bandpass', 'differentiator', 'hilbert'}, optional + The type of filter: + + * 'bandpass' : flat response in bands. This is the default. + + * 'differentiator' : frequency proportional response in bands. + + * 'hilbert' : filter with odd symmetry, that is, type III + (for even order) or type IV (for odd order) + linear phase filters. + + maxiter : int, optional + Maximum number of iterations of the algorithm. Default is 25. + grid_density : int, optional + Grid density. The dense grid used in `remez` is of size + ``(numtaps + 1) * grid_density``. Default is 16. + fs : float, optional + The sampling frequency of the signal. Default is 1. + + Returns + ------- + out : ndarray + A rank-1 array containing the coefficients of the optimal + (in a minimax sense) filter. + + See Also + -------- + firls + firwin + firwin2 + minimum_phase + + References + ---------- + .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the + design of optimum FIR linear phase digital filters", + IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973. + .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer + Program for Designing Optimum FIR Linear Phase Digital + Filters", IEEE Trans. Audio Electroacoust., vol. AU-21, + pp. 506-525, 1973. + + Examples + -------- + In these examples, `remez` is used to design low-pass, high-pass, + band-pass and band-stop filters. The parameters that define each filter + are the filter order, the band boundaries, the transition widths of the + boundaries, the desired gains in each band, and the sampling frequency. + + We'll use a sample frequency of 22050 Hz in all the examples. In each + example, the desired gain in each band is either 0 (for a stop band) + or 1 (for a pass band). + + `freqz` is used to compute the frequency response of each filter, and + the utility function ``plot_response`` defined below is used to plot + the response. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 22050 # Sample rate, Hz + + >>> def plot_response(w, h, title): + ... "Utility function to plot response functions" + ... fig = plt.figure() + ... ax = fig.add_subplot(111) + ... ax.plot(w, 20*np.log10(np.abs(h))) + ... ax.set_ylim(-40, 5) + ... ax.grid(True) + ... ax.set_xlabel('Frequency (Hz)') + ... ax.set_ylabel('Gain (dB)') + ... ax.set_title(title) + + The first example is a low-pass filter, with cutoff frequency 8 kHz. + The filter length is 325, and the transition width from pass to stop + is 100 Hz. + + >>> cutoff = 8000.0 # Desired cutoff frequency, Hz + >>> trans_width = 100 # Width of transition from pass to stop, Hz + >>> numtaps = 325 # Size of the FIR filter. + >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], + ... [1, 0], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Low-pass Filter") + >>> plt.show() + + This example shows a high-pass filter: + + >>> cutoff = 2000.0 # Desired cutoff frequency, Hz + >>> trans_width = 250 # Width of transition from pass to stop, Hz + >>> numtaps = 125 # Size of the FIR filter. + >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs], + ... [0, 1], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "High-pass Filter") + >>> plt.show() + + This example shows a band-pass filter with a pass-band from 2 kHz to + 5 kHz. The transition width is 260 Hz and the length of the filter + is 63, which is smaller than in the other examples: + + >>> band = [2000, 5000] # Desired pass band, Hz + >>> trans_width = 260 # Width of transition from pass to stop, Hz + >>> numtaps = 63 # Size of the FIR filter. + >>> edges = [0, band[0] - trans_width, band[0], band[1], + ... band[1] + trans_width, 0.5*fs] + >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Band-pass Filter") + >>> plt.show() + + The low order leads to higher ripple and less steep transitions. + + The next example shows a band-stop filter. + + >>> band = [6000, 8000] # Desired stop band, Hz + >>> trans_width = 200 # Width of transition from pass to stop, Hz + >>> numtaps = 175 # Size of the FIR filter. + >>> edges = [0, band[0] - trans_width, band[0], band[1], + ... band[1] + trans_width, 0.5*fs] + >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Band-stop Filter") + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + if Hz is _NoValue and fs is None: + fs = 1.0 + elif Hz is not _NoValue: + if fs is not None: + raise ValueError("Values cannot be given for both 'Hz' and 'fs'.") + msg = ("'remez' keyword argument 'Hz' is deprecated in favour of 'fs'" + " and will be removed in SciPy 1.14.0.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + fs = Hz + + # Convert type + try: + tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type] + except KeyError as e: + raise ValueError("Type must be 'bandpass', 'differentiator', " + "or 'hilbert'") from e + + # Convert weight + if weight is None: + weight = [1] * len(desired) + + bands = np.asarray(bands).copy() + return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs, + maxiter, grid_density) + + +@_deprecate_positional_args(version="1.14") +def firls(numtaps, bands, desired, *, weight=None, nyq=_NoValue, fs=None): + """ + FIR filter design using least-squares error minimization. + + Calculate the filter coefficients for the linear-phase finite + impulse response (FIR) filter which has the best approximation + to the desired frequency response described by `bands` and + `desired` in the least squares sense (i.e., the integral of the + weighted mean-squared error within the specified bands is + minimized). + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be odd. + bands : array_like + A monotonic nondecreasing sequence containing the band edges in + Hz. All elements must be non-negative and less than or equal to + the Nyquist frequency given by `nyq`. The bands are specified as + frequency pairs, thus, if using a 1D array, its length must be + even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the + bands can be specified as an nx2 sized 2D array, where n is the + number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`. + desired : array_like + A sequence the same size as `bands` containing the desired gain + at the start and end point of each band. + weight : array_like, optional + A relative weighting to give to each band region when solving + the least squares problem. `weight` has to be half the size of + `bands`. + nyq : float, optional, deprecated + This is the Nyquist frequency. Each frequency in `bands` must be + between 0 and `nyq` (inclusive). Default is 1. + + .. deprecated:: 1.0.0 + `firls` keyword argument `nyq` is deprecated in favour of `fs` and + will be removed in SciPy 1.14.0. + fs : float, optional + The sampling frequency of the signal. Each frequency in `bands` + must be between 0 and ``fs/2`` (inclusive). Default is 2. + + Returns + ------- + coeffs : ndarray + Coefficients of the optimal (in a least squares sense) FIR filter. + + See Also + -------- + firwin + firwin2 + minimum_phase + remez + + Notes + ----- + This implementation follows the algorithm given in [1]_. + As noted there, least squares design has multiple advantages: + + 1. Optimal in a least-squares sense. + 2. Simple, non-iterative method. + 3. The general solution can obtained by solving a linear + system of equations. + 4. Allows the use of a frequency dependent weighting function. + + This function constructs a Type I linear phase FIR filter, which + contains an odd number of `coeffs` satisfying for :math:`n < numtaps`: + + .. math:: coeffs(n) = coeffs(numtaps - 1 - n) + + The odd number of coefficients and filter symmetry avoid boundary + conditions that could otherwise occur at the Nyquist and 0 frequencies + (e.g., for Type II, III, or IV variants). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares. + OpenStax CNX. Aug 9, 2005. + http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7 + + Examples + -------- + We want to construct a band-pass filter. Note that the behavior in the + frequency ranges between our stop bands and pass bands is unspecified, + and thus may overshoot depending on the parameters of our filter: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> fig, axs = plt.subplots(2) + >>> fs = 10.0 # Hz + >>> desired = (0, 0, 1, 1, 0, 0) + >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))): + ... fir_firls = signal.firls(73, bands, desired, fs=fs) + ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs) + ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs) + ... hs = list() + ... ax = axs[bi] + ... for fir in (fir_firls, fir_remez, fir_firwin2): + ... freq, response = signal.freqz(fir) + ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0]) + ... for band, gains in zip(zip(bands[::2], bands[1::2]), + ... zip(desired[::2], desired[1::2])): + ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2) + ... if bi == 0: + ... ax.legend(hs, ('firls', 'remez', 'firwin2'), + ... loc='lower center', frameon=False) + ... else: + ... ax.set_xlabel('Frequency (Hz)') + ... ax.grid(True) + ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude') + ... + >>> fig.tight_layout() + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + nyq = 0.5 * _get_fs(fs, nyq) + + numtaps = int(numtaps) + if numtaps % 2 == 0 or numtaps < 1: + raise ValueError("numtaps must be odd and >= 1") + M = (numtaps-1) // 2 + + # normalize bands 0->1 and make it 2 columns + nyq = float(nyq) + if nyq <= 0: + raise ValueError('nyq must be positive, got %s <= 0.' % nyq) + bands = np.asarray(bands).flatten() / nyq + if len(bands) % 2 != 0: + raise ValueError("bands must contain frequency pairs.") + if (bands < 0).any() or (bands > 1).any(): + raise ValueError("bands must be between 0 and 1 relative to Nyquist") + bands.shape = (-1, 2) + + # check remaining params + desired = np.asarray(desired).flatten() + if bands.size != desired.size: + raise ValueError("desired must have one entry per frequency, got {} " + "gains for {} frequencies.".format(desired.size, bands.size)) + desired.shape = (-1, 2) + if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any(): + raise ValueError("bands must be monotonically nondecreasing and have " + "width > 0.") + if (bands[:-1, 1] > bands[1:, 0]).any(): + raise ValueError("bands must not overlap.") + if (desired < 0).any(): + raise ValueError("desired must be non-negative.") + if weight is None: + weight = np.ones(len(desired)) + weight = np.asarray(weight).flatten() + if len(weight) != len(desired): + raise ValueError("weight must be the same size as the number of " + f"band pairs ({len(bands)}).") + if (weight < 0).any(): + raise ValueError("weight must be non-negative.") + + # Set up the linear matrix equation to be solved, Qa = b + + # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n) + # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel. + + # We omit the factor of 0.5 above, instead adding it during coefficient + # calculation. + + # We also omit the 1/π from both Q and b equations, as they cancel + # during solving. + + # We have that: + # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval f1->f2 we get: + # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = np.arange(numtaps)[:, np.newaxis, np.newaxis] + q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight) + + # Now we assemble our sum of Toeplitz and Hankel + Q1 = toeplitz(q[:M+1]) + Q2 = hankel(q[:M+1], q[M:]) + Q = Q1 + Q2 + + # Now for b(n) we have that: + # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval and a linear term for D(ω) we get (over each f1->f2 interval): + # b(n) = W ∫ (mf+c)cos(πnf)df + # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2 + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = n[:M + 1] # only need this many coefficients here + # Choose m and c such that we are at the start and end weights + m = (np.diff(desired, axis=1) / np.diff(bands, axis=1)) + c = desired[:, [0]] - bands[:, [0]] * m + b = bands * (m*bands + c) * np.sinc(bands * n) + # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0 + b[0] -= m * bands * bands / 2. + b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2 + b = np.dot(np.diff(b, axis=2)[:, :, 0], weight) + + # Now we can solve the equation + try: # try the fast way + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + a = solve(Q, b, assume_a="pos", check_finite=False) + for ww in w: + if (ww.category == LinAlgWarning and + str(ww.message).startswith('Ill-conditioned matrix')): + raise LinAlgError(str(ww.message)) + except LinAlgError: # in case Q is rank deficient + # This is faster than pinvh, even though we don't explicitly use + # the symmetry here. gelsy was faster than gelsd and gelss in + # some non-exhaustive tests. + a = lstsq(Q, b, lapack_driver='gelsy')[0] + + # make coefficients symmetric (linear phase) + coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:])) + return coeffs + + +def _dhtm(mag): + """Compute the modified 1-D discrete Hilbert transform + + Parameters + ---------- + mag : ndarray + The magnitude spectrum. Should be 1-D with an even length, and + preferably a fast length for FFT/IFFT. + """ + # Adapted based on code by Niranjan Damera-Venkata, + # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`) + sig = np.zeros(len(mag)) + # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5 + midpt = len(mag) // 2 + sig[1:midpt] = 1 + sig[midpt+1:] = -1 + # eventually if we want to support complex filters, we will need a + # np.abs() on the mag inside the log, and should remove the .real + recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real + return recon + + +def minimum_phase(h, method='homomorphic', n_fft=None): + """Convert a linear-phase FIR filter to minimum phase + + Parameters + ---------- + h : array + Linear-phase FIR filter coefficients. + method : {'hilbert', 'homomorphic'} + The method to use: + + 'homomorphic' (default) + This method [4]_ [5]_ works best with filters with an + odd number of taps, and the resulting minimum phase filter + will have a magnitude response that approximates the square + root of the original filter's magnitude response. + + 'hilbert' + This method [1]_ is designed to be used with equiripple + filters (e.g., from `remez`) with unity or zero gain + regions. + + n_fft : int + The number of points to use for the FFT. Should be at least a + few times larger than the signal length (see Notes). + + Returns + ------- + h_minimum : array + The minimum-phase version of the filter, with length + ``(length(h) + 1) // 2``. + + See Also + -------- + firwin + firwin2 + remez + + Notes + ----- + Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection + of an FFT length to estimate the complex cepstrum of the filter. + + In the case of the Hilbert method, the deviation from the ideal + spectrum ``epsilon`` is related to the number of stopband zeros + ``n_stop`` and FFT length ``n_fft`` as:: + + epsilon = 2. * n_stop / n_fft + + For example, with 100 stopband zeros and a FFT length of 2048, + ``epsilon = 0.0976``. If we conservatively assume that the number of + stopband zeros is one less than the filter length, we can take the FFT + length to be the next power of 2 that satisfies ``epsilon=0.01`` as:: + + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + + This gives reasonable results for both the Hilbert and homomorphic + methods, and gives the value used when ``n_fft=None``. + + Alternative implementations exist for creating minimum-phase filters, + including zero inversion [2]_ and spectral factorization [3]_ [4]_. + For more information, see: + + http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters + + References + ---------- + .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and + complex minimum phase digital FIR filters," Acoustics, Speech, + and Signal Processing, 1999. Proceedings., 1999 IEEE International + Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3. + :doi:`10.1109/ICASSP.1999.756179` + .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR + filters by direct factorization," Signal Processing, + vol. 10, no. 4, pp. 369-383, Jun. 1986. + .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in + Handbook for Digital Signal Processing, chapter 4, + New York: Wiley-Interscience, 1993. + .. [4] J. S. Lim, Advanced Topics in Signal Processing. + Englewood Cliffs, N.J.: Prentice Hall, 1988. + .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck, + "Discrete-Time Signal Processing," 2nd edition. + Upper Saddle River, N.J.: Prentice Hall, 1999. + + Examples + -------- + Create an optimal linear-phase filter, then convert it to minimum phase: + + >>> import numpy as np + >>> from scipy.signal import remez, minimum_phase, freqz, group_delay + >>> import matplotlib.pyplot as plt + >>> freq = [0, 0.2, 0.3, 1.0] + >>> desired = [1, 0] + >>> h_linear = remez(151, freq, desired, fs=2.) + + Convert it to minimum phase: + + >>> h_min_hom = minimum_phase(h_linear, method='homomorphic') + >>> h_min_hil = minimum_phase(h_linear, method='hilbert') + + Compare the three filters: + + >>> fig, axs = plt.subplots(4, figsize=(4, 8)) + >>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil), + ... ('-', '-', '--'), ('k', 'r', 'c')): + ... w, H = freqz(h) + ... w, gd = group_delay((h, 1)) + ... w /= np.pi + ... axs[0].plot(h, color=color, linestyle=style) + ... axs[1].plot(w, np.abs(H), color=color, linestyle=style) + ... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style) + ... axs[3].plot(w, gd, color=color, linestyle=style) + >>> for ax in axs: + ... ax.grid(True, color='0.5') + ... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1) + >>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples') + >>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase') + >>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])): + ... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency') + >>> axs[1].set(ylabel='Magnitude') + >>> axs[2].set(ylabel='Magnitude (dB)') + >>> axs[3].set(ylabel='Group delay') + >>> plt.tight_layout() + + """ + h = np.asarray(h) + if np.iscomplexobj(h): + raise ValueError('Complex filters not supported') + if h.ndim != 1 or h.size <= 2: + raise ValueError('h must be 1-D and at least 2 samples long') + n_half = len(h) // 2 + if not np.allclose(h[-n_half:][::-1], h[:n_half]): + warnings.warn('h does not appear to by symmetric, conversion may fail', + RuntimeWarning, stacklevel=2) + if not isinstance(method, str) or method not in \ + ('homomorphic', 'hilbert',): + raise ValueError(f'method must be "homomorphic" or "hilbert", got {method!r}') + if n_fft is None: + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError('n_fft must be at least len(h)==%s' % len(h)) + if method == 'hilbert': + w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half) + H = np.real(fft(h, n_fft) * np.exp(1j * w)) + dp = max(H) - 1 + ds = 0 - min(H) + S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2 + H += ds + H *= S + H = np.sqrt(H, out=H) + H += 1e-10 # ensure that the log does not explode + h_minimum = _dhtm(H) + else: # method == 'homomorphic' + # zero-pad; calculate the DFT + h_temp = np.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + np.log(h_temp, out=h_temp) + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + win = np.zeros(n_fft) + win[0] = 1 + stop = (len(h) + 1) // 2 + win[1:stop] = 2 + if len(h) % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(np.exp(fft(h_temp))) + h_minimum = h_temp.real + n_out = n_half + len(h) % 2 + return h_minimum[:n_out] diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..2adfeb0c5f65d251e3165054f1067a40ac564f0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py @@ -0,0 +1,533 @@ +""" +ltisys -- a collection of functions to convert linear time invariant systems +from one representation to another. +""" +import numpy +import numpy as np +from numpy import (r_, eye, atleast_2d, poly, dot, + asarray, prod, zeros, array, outer) +from scipy import linalg + +from ._filter_design import tf2zpk, zpk2tf, normalize + + +__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete'] + + +def tf2ss(num, den): + r"""Transfer function to state-space representation. + + Parameters + ---------- + num, den : array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree. The + denominator needs to be at least as long as the numerator. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + Examples + -------- + Convert the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + to the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> from scipy.signal import tf2ss + >>> A, B, C, D = tf2ss(num, den) + >>> A + array([[-2., -1.], + [ 1., 0.]]) + >>> B + array([[ 1.], + [ 0.]]) + >>> C + array([[ 1., 2.]]) + >>> D + array([[ 1.]]) + """ + # Controller canonical state-space representation. + # if M+1 = len(num) and K+1 = len(den) then we must have M <= K + # states are found by asserting that X(s) = U(s) / D(s) + # then Y(s) = N(s) * X(s) + # + # A, B, C, and D follow quite naturally. + # + num, den = normalize(num, den) # Strips zeros, checks arrays + nn = len(num.shape) + if nn == 1: + num = asarray([num], num.dtype) + M = num.shape[1] + K = len(den) + if M > K: + msg = "Improper transfer function. `num` is longer than `den`." + raise ValueError(msg) + if M == 0 or K == 0: # Null system + return (array([], float), array([], float), array([], float), + array([], float)) + + # pad numerator to have same number of columns has denominator + num = np.hstack((np.zeros((num.shape[0], K - M), dtype=num.dtype), num)) + + if num.shape[-1] > 0: + D = atleast_2d(num[:, 0]) + + else: + # We don't assign it an empty array because this system + # is not 'null'. It just doesn't have a non-zero D + # matrix. Thus, it should have a non-zero shape so that + # it can be operated on by functions like 'ss2tf' + D = array([[0]], float) + + if K == 1: + D = D.reshape(num.shape) + + return (zeros((1, 1)), zeros((1, D.shape[1])), + zeros((D.shape[0], 1)), D) + + frow = -array([den[1:]]) + A = r_[frow, eye(K - 2, K - 1)] + B = eye(K - 1, 1) + C = num[:, 1:] - outer(num[:, 0], den[1:]) + D = D.reshape((C.shape[0], B.shape[1])) + + return A, B, C, D + + +def _none_to_empty_2d(arg): + if arg is None: + return zeros((0, 0)) + else: + return arg + + +def _atleast_2d_or_none(arg): + if arg is not None: + return atleast_2d(arg) + + +def _shape_or_none(M): + if M is not None: + return M.shape + else: + return (None,) * 2 + + +def _choice_not_none(*args): + for arg in args: + if arg is not None: + return arg + + +def _restore(M, shape): + if M.shape == (0, 0): + return zeros(shape) + else: + if M.shape != shape: + raise ValueError("The input arrays have incompatible shapes.") + return M + + +def abcd_normalize(A=None, B=None, C=None, D=None): + """Check state-space matrices and ensure they are 2-D. + + If enough information on the system is provided, that is, enough + properly-shaped arrays are passed to the function, the missing ones + are built from this information, ensuring the correct number of + rows and columns. Otherwise a ValueError is raised. + + Parameters + ---------- + A, B, C, D : array_like, optional + State-space matrices. All of them are None (missing) by default. + See `ss2tf` for format. + + Returns + ------- + A, B, C, D : array + Properly shaped state-space matrices. + + Raises + ------ + ValueError + If not enough information on the system was provided. + + """ + A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D)) + + MA, NA = _shape_or_none(A) + MB, NB = _shape_or_none(B) + MC, NC = _shape_or_none(C) + MD, ND = _shape_or_none(D) + + p = _choice_not_none(MA, MB, NC) + q = _choice_not_none(NB, ND) + r = _choice_not_none(MC, MD) + if p is None or q is None or r is None: + raise ValueError("Not enough information on the system.") + + A, B, C, D = map(_none_to_empty_2d, (A, B, C, D)) + A = _restore(A, (p, p)) + B = _restore(B, (p, q)) + C = _restore(C, (r, p)) + D = _restore(D, (r, q)) + + return A, B, C, D + + +def ss2tf(A, B, C, D, input=0): + r"""State-space to transfer function. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + num : 2-D ndarray + Numerator(s) of the resulting transfer function(s). `num` has one row + for each of the system's outputs. Each row is a sequence representation + of the numerator polynomial. + den : 1-D ndarray + Denominator of the resulting transfer function(s). `den` is a sequence + representation of the denominator polynomial. + + Examples + -------- + Convert the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> A = [[-2, -1], [1, 0]] + >>> B = [[1], [0]] # 2-D column vector + >>> C = [[1, 2]] # 2-D row vector + >>> D = 1 + + to the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> from scipy.signal import ss2tf + >>> ss2tf(A, B, C, D) + (array([[1., 3., 3.]]), array([ 1., 2., 1.])) + """ + # transfer function is C (sI - A)**(-1) B + D + + # Check consistency and make them all rank-2 arrays + A, B, C, D = abcd_normalize(A, B, C, D) + + nout, nin = D.shape + if input >= nin: + raise ValueError("System does not have the input specified.") + + # make SIMO from possibly MIMO system. + B = B[:, input:input + 1] + D = D[:, input:input + 1] + + try: + den = poly(A) + except ValueError: + den = 1 + + if (prod(B.shape, axis=0) == 0) and (prod(C.shape, axis=0) == 0): + num = numpy.ravel(D) + if (prod(D.shape, axis=0) == 0) and (prod(A.shape, axis=0) == 0): + den = [] + return num, den + + num_states = A.shape[0] + type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0 + num = numpy.empty((nout, num_states + 1), type_test.dtype) + for k in range(nout): + Ck = atleast_2d(C[k, :]) + num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den + + return num, den + + +def zpk2ss(z, p, k): + """Zero-pole-gain representation to state-space representation + + Parameters + ---------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + """ + return tf2ss(*zpk2tf(z, p, k)) + + +def ss2zpk(A, B, C, D, input=0): + """State-space representation to zero-pole-gain representation. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + """ + return tf2zpk(*ss2tf(A, B, C, D, input=input)) + + +def cont2discrete(system, dt, method="zoh", alpha=None): + """ + Transform a continuous to a discrete state-space system. + + Parameters + ---------- + system : a tuple describing the system or an instance of `lti` + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + dt : float + The discretization time step. + method : str, optional + Which method to use: + + * gbt: generalized bilinear transformation + * bilinear: Tustin's approximation ("gbt" with alpha=0.5) + * euler: Euler (or forward differencing) method ("gbt" with alpha=0) + * backward_diff: Backwards differencing ("gbt" with alpha=1.0) + * zoh: zero-order hold (default) + * foh: first-order hold (*versionadded: 1.3.0*) + * impulse: equivalent impulse response (*versionadded: 1.3.0*) + + alpha : float within [0, 1], optional + The generalized bilinear transformation weighting parameter, which + should only be specified with method="gbt", and is ignored otherwise + + Returns + ------- + sysd : tuple containing the discrete system + Based on the input type, the output will be of the form + + * (num, den, dt) for transfer function input + * (zeros, poles, gain, dt) for zeros-poles-gain input + * (A, B, C, D, dt) for state-space system input + + Notes + ----- + By default, the routine uses a Zero-Order Hold (zoh) method to perform + the transformation. Alternatively, a generalized bilinear transformation + may be used, which includes the common Tustin's bilinear approximation, + an Euler's method technique, or a backwards differencing technique. + + The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear + approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method + is based on [4]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models + + .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf + + .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized + bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, + 2009. + (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) + + .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control + of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley, + pp. 204-206, 1998. + + Examples + -------- + We can transform a continuous state-space system to a discrete one: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cont2discrete, lti, dlti, dstep + + Define a continuous state-space system. + + >>> A = np.array([[0, 1],[-10., -3]]) + >>> B = np.array([[0],[10.]]) + >>> C = np.array([[1., 0]]) + >>> D = np.array([[0.]]) + >>> l_system = lti(A, B, C, D) + >>> t, x = l_system.step(T=np.linspace(0, 5, 100)) + >>> fig, ax = plt.subplots() + >>> ax.plot(t, x, label='Continuous', linewidth=3) + + Transform it to a discrete state-space system using several methods. + + >>> dt = 0.1 + >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']: + ... d_system = cont2discrete((A, B, C, D), dt, method=method) + ... s, x_d = dstep(d_system) + ... ax.step(s, np.squeeze(x_d), label=method, where='post') + >>> ax.axis([t[0], t[-1], x[0], 1.4]) + >>> ax.legend(loc='best') + >>> fig.tight_layout() + >>> plt.show() + + """ + if len(system) == 1: + return system.to_discrete() + if len(system) == 2: + sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, + alpha=alpha) + return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 3: + sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, + method=method, alpha=alpha) + return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 4: + a, b, c, d = system + else: + raise ValueError("First argument must either be a tuple of 2 (tf), " + "3 (zpk), or 4 (ss) arrays.") + + if method == 'gbt': + if alpha is None: + raise ValueError("Alpha parameter must be specified for the " + "generalized bilinear transform (gbt) method") + elif alpha < 0 or alpha > 1: + raise ValueError("Alpha parameter must be within the interval " + "[0,1] for the gbt method") + + if method == 'gbt': + # This parameter is used repeatedly - compute once here + ima = np.eye(a.shape[0]) - alpha*dt*a + ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) + bd = linalg.solve(ima, dt*b) + + # Similarly solve for the output equation matrices + cd = linalg.solve(ima.transpose(), c.transpose()) + cd = cd.transpose() + dd = d + alpha*np.dot(c, bd) + + elif method == 'bilinear' or method == 'tustin': + return cont2discrete(system, dt, method="gbt", alpha=0.5) + + elif method == 'euler' or method == 'forward_diff': + return cont2discrete(system, dt, method="gbt", alpha=0.0) + + elif method == 'backward_diff': + return cont2discrete(system, dt, method="gbt", alpha=1.0) + + elif method == 'zoh': + # Build an exponential matrix + em_upper = np.hstack((a, b)) + + # Need to stack zeros under the a and b matrices + em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), + np.zeros((b.shape[1], b.shape[1])))) + + em = np.vstack((em_upper, em_lower)) + ms = linalg.expm(dt * em) + + # Dispose of the lower rows + ms = ms[:a.shape[0], :] + + ad = ms[:, 0:a.shape[1]] + bd = ms[:, a.shape[1]:] + + cd = c + dd = d + + elif method == 'foh': + # Size parameters for convenience + n = a.shape[0] + m = b.shape[1] + + # Build an exponential matrix similar to 'zoh' method + em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m)) + em_lower = zeros((m, n + 2 * m)) + em = np.block([[em_upper], [em_lower]]) + + ms = linalg.expm(em) + + # Get the three blocks from upper rows + ms11 = ms[:n, 0:n] + ms12 = ms[:n, n:n + m] + ms13 = ms[:n, n + m:] + + ad = ms11 + bd = ms12 - ms13 + ms11 @ ms13 + cd = c + dd = d + c @ ms13 + + elif method == 'impulse': + if not np.allclose(d, 0): + raise ValueError("Impulse method is only applicable" + "to strictly proper systems") + + ad = linalg.expm(a * dt) + bd = ad @ b * dt + cd = c + dd = c @ b * dt + + else: + raise ValueError("Unknown transformation method '%s'" % method) + + return ad, bd, cd, dd, dt diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_ltisys.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..b0117bad7a4674e00d0ceb04e1bf3c459f63b2e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_ltisys.py @@ -0,0 +1,3496 @@ +""" +ltisys -- a collection of classes and functions for modeling linear +time invariant systems. +""" +# +# Author: Travis Oliphant 2001 +# +# Feb 2010: Warren Weckesser +# Rewrote lsim2 and added impulse2. +# Apr 2011: Jeffrey Armstrong +# Added dlsim, dstep, dimpulse, cont2discrete +# Aug 2013: Juan Luis Cano +# Rewrote abcd_normalize. +# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr +# Added pole placement +# Mar 2015: Clancy Rowley +# Rewrote lsim +# May 2015: Felix Berkenkamp +# Split lti class into subclasses +# Merged discrete systems and added dlti + +import warnings + +# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7 +# use scipy's qr until this is solved + +from scipy.linalg import qr as s_qr +from scipy import linalg +from scipy.interpolate import make_interp_spline +from ._filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk, + freqz_zpk) +from ._lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk, + cont2discrete, _atleast_2d_or_none) + +import numpy +import numpy as np +from numpy import (real, atleast_1d, squeeze, asarray, zeros, + dot, transpose, ones, linspace) +import copy + +__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'impulse', 'step', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode'] + + +class LinearTimeInvariant: + def __new__(cls, *system, **kwargs): + """Create a new object, don't allow direct instances.""" + if cls is LinearTimeInvariant: + raise NotImplementedError('The LinearTimeInvariant class is not ' + 'meant to be used directly, use `lti` ' + 'or `dlti` instead.') + return super().__new__(cls) + + def __init__(self): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__() + + self.inputs = None + self.outputs = None + self._dt = None + + @property + def dt(self): + """Return the sampling time of the system, `None` for `lti` systems.""" + return self._dt + + @property + def _dt_dict(self): + if self.dt is None: + return {} + else: + return {'dt': self.dt} + + @property + def zeros(self): + """Zeros of the system.""" + return self.to_zpk().zeros + + @property + def poles(self): + """Poles of the system.""" + return self.to_zpk().poles + + def _as_ss(self): + """Convert to `StateSpace` system, without copying. + + Returns + ------- + sys: StateSpace + The `StateSpace` system. If the class is already an instance of + `StateSpace` then this instance is returned. + """ + if isinstance(self, StateSpace): + return self + else: + return self.to_ss() + + def _as_zpk(self): + """Convert to `ZerosPolesGain` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `ZerosPolesGain` system. If the class is already an instance of + `ZerosPolesGain` then this instance is returned. + """ + if isinstance(self, ZerosPolesGain): + return self + else: + return self.to_zpk() + + def _as_tf(self): + """Convert to `TransferFunction` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `TransferFunction` system. If the class is already an instance of + `TransferFunction` then this instance is returned. + """ + if isinstance(self, TransferFunction): + return self + else: + return self.to_tf() + + +class lti(LinearTimeInvariant): + r""" + Continuous-time linear time invariant system base class. + + Parameters + ---------- + *system : arguments + The `lti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + continuous-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, dlti + + Notes + ----- + `lti` instances do not exist directly. Instead, `lti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, + 5]``). + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> signal.lti(1, 2, 3, 4) + StateSpaceContinuous( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: None + ) + + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> signal.lti([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`: + + >>> signal.lti([3, 4], [1, 2]) + TransferFunctionContinuous( + array([3., 4.]), + array([1., 2.]), + dt: None + ) + + """ + def __new__(cls, *system): + """Create an instance of the appropriate subclass.""" + if cls is lti: + N = len(system) + if N == 2: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, *system) + elif N == 3: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, *system) + elif N == 4: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system) + else: + raise ValueError("`system` needs to be an instance of `lti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__(*system) + + def impulse(self, X0=None, T=None, N=None): + """ + Return the impulse response of a continuous-time system. + See `impulse` for details. + """ + return impulse(self, X0=X0, T=T, N=N) + + def step(self, X0=None, T=None, N=None): + """ + Return the step response of a continuous-time system. + See `step` for details. + """ + return step(self, X0=X0, T=T, N=N) + + def output(self, U, T, X0=None): + """ + Return the response of a continuous-time system to input `U`. + See `lsim` for details. + """ + return lsim(self, U, T, X0=X0) + + def bode(self, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `bode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return bode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000): + """ + Calculate the frequency response of a continuous-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `freqresp` for details. + """ + return freqresp(self, w=w, n=n) + + def to_discrete(self, dt, method='zoh', alpha=None): + """Return a discretized version of the current system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` + """ + raise NotImplementedError('to_discrete is not implemented for this ' + 'system class.') + + +class dlti(LinearTimeInvariant): + r""" + Discrete-time linear time invariant system base class. + + Parameters + ---------- + *system: arguments + The `dlti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + discrete-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to ``True`` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, lti + + Notes + ----- + `dlti` instances do not exist directly. Instead, `dlti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, + 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy import signal + + >>> signal.dlti(1, 2, 3, 4) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: True + ) + + >>> signal.dlti(1, 2, 3, 4, dt=0.1) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: 0.1 + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with + a sampling time of 0.1 seconds: + + >>> signal.dlti([3, 4], [1, 2], dt=0.1) + TransferFunctionDiscrete( + array([3., 4.]), + array([1., 2.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Create an instance of the appropriate subclass.""" + if cls is dlti: + N = len(system) + if N == 2: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, *system, **kwargs) + elif N == 3: + return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, + *system, **kwargs) + elif N == 4: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, + **kwargs) + else: + raise ValueError("`system` needs to be an instance of `dlti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + dt = kwargs.pop('dt', True) + super().__init__(*system, **kwargs) + + self.dt = dt + + @property + def dt(self): + """Return the sampling time of the system.""" + return self._dt + + @dt.setter + def dt(self, dt): + self._dt = dt + + def impulse(self, x0=None, t=None, n=None): + """ + Return the impulse response of the discrete-time `dlti` system. + See `dimpulse` for details. + """ + return dimpulse(self, x0=x0, t=t, n=n) + + def step(self, x0=None, t=None, n=None): + """ + Return the step response of the discrete-time `dlti` system. + See `dstep` for details. + """ + return dstep(self, x0=x0, t=t, n=n) + + def output(self, u, t, x0=None): + """ + Return the response of the discrete-time system to input `u`. + See `dlsim` for details. + """ + return dlsim(self, u, t, x0=x0) + + def bode(self, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `dbode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` + with sampling time 0.5s: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) + + Equivalent: signal.dbode(sys) + + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return dbode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000, whole=False): + """ + Calculate the frequency response of a discrete-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `dfreqresp` for details. + + """ + return dfreqresp(self, w=w, n=n, whole=whole) + + +class TransferFunction(LinearTimeInvariant): + r"""Linear Time Invariant system class in transfer function form. + + Represents the system as the continuous-time transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the + discrete-time transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + `TransferFunction` systems inherit additional + functionality from the `lti`, respectively the `dlti` classes, depending on + which system representation is used. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, lti, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be + represented as ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of + 0.1 seconds: + + >>> signal.TransferFunction(num, den, dt=0.1) + TransferFunctionDiscrete( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of lti.""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_tf() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is TransferFunction: + if kwargs.get('dt') is None: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, + *system, + **kwargs) + else: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, + *system, + **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space LTI system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._num = None + self._den = None + + self.num, self.den = normalize(*system) + + def __repr__(self): + """Return representation of the system's transfer function""" + return '{}(\n{},\n{},\ndt: {}\n)'.format( + self.__class__.__name__, + repr(self.num), + repr(self.den), + repr(self.dt), + ) + + @property + def num(self): + """Numerator of the `TransferFunction` system.""" + return self._num + + @num.setter + def num(self, num): + self._num = atleast_1d(num) + + # Update dimensions + if len(self.num.shape) > 1: + self.outputs, self.inputs = self.num.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def den(self): + """Denominator of the `TransferFunction` system.""" + return self._den + + @den.setter + def den(self, den): + self._den = atleast_1d(den) + + def _copy(self, system): + """ + Copy the parameters of another `TransferFunction` object + + Parameters + ---------- + system : `TransferFunction` + The `StateSpace` system that is to be copied + + """ + self.num = system.num + self.den = system.den + + def to_tf(self): + """ + Return a copy of the current `TransferFunction` system. + + Returns + ------- + sys : instance of `TransferFunction` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_zpk(self): + """ + Convert system representation to `ZerosPolesGain`. + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*tf2zpk(self.num, self.den), + **self._dt_dict) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*tf2ss(self.num, self.den), + **self._dt_dict) + + @staticmethod + def _z_to_zinv(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((np.zeros(diff), den)) + elif diff < 0: + num = np.hstack((np.zeros(-diff), num)) + return num, den + + @staticmethod + def _zinv_to_z(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((den, np.zeros(diff))) + elif diff < 0: + num = np.hstack((num, np.zeros(-diff))) + return num, den + + +class TransferFunctionContinuous(TransferFunction, lti): + r""" + Continuous-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Continuous-time `TransferFunction` systems inherit additional + functionality from the `lti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + + See Also + -------- + ZerosPolesGain, StateSpace, lti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `TransferFunction` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return TransferFunction(*cont2discrete((self.num, self.den), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class TransferFunctionDiscrete(TransferFunction, dlti): + r""" + Discrete-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Discrete-time `TransferFunction` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as + ``[1, 3, 5]``). + + Examples + -------- + Construct the transfer function + :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of + 0.5 seconds: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den, dt=0.5) + TransferFunctionDiscrete( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: 0.5 + ) + + """ + pass + + +class ZerosPolesGain(LinearTimeInvariant): + r""" + Linear Time Invariant system class in zeros, poles, gain form. + + Represents the system as the continuous- or discrete-time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + `ZerosPolesGain` systems inherit additional functionality from the `lti`, + respectively the `dlti` classes, depending on which system representation + is used. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + + See Also + -------- + TransferFunction, StateSpace, lti, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of `lti`""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_zpk() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is ZerosPolesGain: + if kwargs.get('dt') is None: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, + *system, + **kwargs) + else: + return ZerosPolesGainDiscrete.__new__( + ZerosPolesGainDiscrete, + *system, + **kwargs + ) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the zeros, poles, gain system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + super().__init__(**kwargs) + + self._zeros = None + self._poles = None + self._gain = None + + self.zeros, self.poles, self.gain = system + + def __repr__(self): + """Return representation of the `ZerosPolesGain` system.""" + return '{}(\n{},\n{},\n{},\ndt: {}\n)'.format( + self.__class__.__name__, + repr(self.zeros), + repr(self.poles), + repr(self.gain), + repr(self.dt), + ) + + @property + def zeros(self): + """Zeros of the `ZerosPolesGain` system.""" + return self._zeros + + @zeros.setter + def zeros(self, zeros): + self._zeros = atleast_1d(zeros) + + # Update dimensions + if len(self.zeros.shape) > 1: + self.outputs, self.inputs = self.zeros.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def poles(self): + """Poles of the `ZerosPolesGain` system.""" + return self._poles + + @poles.setter + def poles(self, poles): + self._poles = atleast_1d(poles) + + @property + def gain(self): + """Gain of the `ZerosPolesGain` system.""" + return self._gain + + @gain.setter + def gain(self, gain): + self._gain = gain + + def _copy(self, system): + """ + Copy the parameters of another `ZerosPolesGain` system. + + Parameters + ---------- + system : instance of `ZerosPolesGain` + The zeros, poles gain system that is to be copied + + """ + self.poles = system.poles + self.zeros = system.zeros + self.gain = system.gain + + def to_tf(self): + """ + Convert system representation to `TransferFunction`. + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), + **self._dt_dict) + + def to_zpk(self): + """ + Return a copy of the current 'ZerosPolesGain' system. + + Returns + ------- + sys : instance of `ZerosPolesGain` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), + **self._dt_dict) + + +class ZerosPolesGainContinuous(ZerosPolesGain, lti): + r""" + Continuous-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the continuous time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + Continuous-time `ZerosPolesGain` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + + See Also + -------- + TransferFunction, StateSpace, lti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `ZerosPolesGain` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `ZerosPolesGain` + """ + return ZerosPolesGain( + *cont2discrete((self.zeros, self.poles, self.gain), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): + r""" + Discrete-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the discrete-time transfer function + :math:`H(z)=k \prod_i (z - q[i]) / \prod_j (z - p[j])`, where :math:`k` is + the `gain`, :math:`q` are the `zeros` and :math:`p` are the `poles`. + Discrete-time `ZerosPolesGain` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, StateSpace, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + pass + + +class StateSpace(LinearTimeInvariant): + r""" + Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u` or the discrete-time difference + equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems + inherit additional functionality from the `lti`, respectively the `dlti` + classes, depending on which system representation is used. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 4 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, lti, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + >>> import numpy as np + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + >>> sys.to_discrete(0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + + # Override NumPy binary operations and ufuncs + __array_priority__ = 100.0 + __array_ufunc__ = None + + def __new__(cls, *system, **kwargs): + """Create new StateSpace object and settle inheritance.""" + # Handle object conversion if input is an instance of `lti` + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_ss() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is StateSpace: + if kwargs.get('dt') is None: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system, **kwargs) + else: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, + *system, **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space lti/dlti system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._A = None + self._B = None + self._C = None + self._D = None + + self.A, self.B, self.C, self.D = abcd_normalize(*system) + + def __repr__(self): + """Return representation of the `StateSpace` system.""" + return '{}(\n{},\n{},\n{},\n{},\ndt: {}\n)'.format( + self.__class__.__name__, + repr(self.A), + repr(self.B), + repr(self.C), + repr(self.D), + repr(self.dt), + ) + + def _check_binop_other(self, other): + return isinstance(other, (StateSpace, np.ndarray, float, complex, + np.number, int)) + + def __mul__(self, other): + """ + Post-multiply another system or a scalar + + Handles multiplication of systems in the sense of a frequency domain + multiplication. That means, given two systems E1(s) and E2(s), their + multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) + is equivalent to first applying E2(s), and then E1(s). + + Notes + ----- + For SISO systems the order of system application does not matter. + However, for MIMO systems, where the two systems are matrices, the + order above ensures standard Matrix multiplication rules apply. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + return NotImplemented + + if self.dt != other.dt: + raise TypeError('Cannot multiply systems with different `dt`.') + + n1 = self.A.shape[0] + n2 = other.A.shape[0] + + # Interconnection of systems + # x1' = A1 x1 + B1 u1 + # y1 = C1 x1 + D1 u1 + # x2' = A2 x2 + B2 y1 + # y2 = C2 x2 + D2 y1 + # + # Plugging in with u1 = y2 yields + # [x1'] [A1 B1*C2 ] [x1] [B1*D2] + # [x2'] = [0 A2 ] [x2] + [B2 ] u2 + # [x1] + # y2 = [C1 D1*C2] [x2] + D1*D2 u2 + a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))), + np.hstack((zeros((n2, n1)), other.A)))) + b = np.vstack((np.dot(self.B, other.D), other.B)) + c = np.hstack((self.C, np.dot(self.D, other.C))) + d = np.dot(self.D, other.D) + else: + # Assume that other is a scalar / matrix + # For post multiplication the input gets scaled + a = self.A + b = np.dot(self.B, other) + c = self.C + d = np.dot(self.D, other) + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __rmul__(self, other): + """Pre-multiply a scalar or matrix (but not StateSpace)""" + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + # For pre-multiplication only the output gets scaled + a = self.A + b = self.B + c = np.dot(other, self.C) + d = np.dot(other, self.D) + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __neg__(self): + """Negate the system (equivalent to pre-multiplying by -1).""" + return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict) + + def __add__(self, other): + """ + Adds two systems in the sense of frequency domain addition. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + raise TypeError(f'Cannot add {type(self)} and {type(other)}') + + if self.dt != other.dt: + raise TypeError('Cannot add systems with different `dt`.') + # Interconnection of systems + # x1' = A1 x1 + B1 u + # y1 = C1 x1 + D1 u + # x2' = A2 x2 + B2 u + # y2 = C2 x2 + D2 u + # y = y1 + y2 + # + # Plugging in yields + # [x1'] [A1 0 ] [x1] [B1] + # [x2'] = [0 A2] [x2] + [B2] u + # [x1] + # y = [C1 C2] [x2] + [D1 + D2] u + a = linalg.block_diag(self.A, other.A) + b = np.vstack((self.B, other.B)) + c = np.hstack((self.C, other.C)) + d = self.D + other.D + else: + other = np.atleast_2d(other) + if self.D.shape == other.shape: + # A scalar/matrix is really just a static system (A=0, B=0, C=0) + a = self.A + b = self.B + c = self.C + d = self.D + other + else: + raise ValueError("Cannot add systems with incompatible " + f"dimensions ({self.D.shape} and {other.shape})") + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __sub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(-other) + + def __radd__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(other) + + def __rsub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return (-self).__add__(other) + + def __truediv__(self, other): + """ + Divide by a scalar + """ + # Division by non-StateSpace scalars + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + if isinstance(other, np.ndarray) and other.ndim > 0: + # It's ambiguous what this means, so disallow it + raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays") + + return self.__mul__(1/other) + + @property + def A(self): + """State matrix of the `StateSpace` system.""" + return self._A + + @A.setter + def A(self, A): + self._A = _atleast_2d_or_none(A) + + @property + def B(self): + """Input matrix of the `StateSpace` system.""" + return self._B + + @B.setter + def B(self, B): + self._B = _atleast_2d_or_none(B) + self.inputs = self.B.shape[-1] + + @property + def C(self): + """Output matrix of the `StateSpace` system.""" + return self._C + + @C.setter + def C(self, C): + self._C = _atleast_2d_or_none(C) + self.outputs = self.C.shape[0] + + @property + def D(self): + """Feedthrough matrix of the `StateSpace` system.""" + return self._D + + @D.setter + def D(self, D): + self._D = _atleast_2d_or_none(D) + + def _copy(self, system): + """ + Copy the parameters of another `StateSpace` system. + + Parameters + ---------- + system : instance of `StateSpace` + The state-space system that is to be copied + + """ + self.A = system.A + self.B = system.B + self.C = system.C + self.D = system.D + + def to_tf(self, **kwargs): + """ + Convert system representation to `TransferFunction`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_zpk(self, **kwargs): + """ + Convert system representation to `ZerosPolesGain`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_ss(self): + """ + Return a copy of the current `StateSpace` system. + + Returns + ------- + sys : instance of `StateSpace` + The current system (copy) + + """ + return copy.deepcopy(self) + + +class StateSpaceContinuous(StateSpace, lti): + r""" + Continuous-time Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u`. + Continuous-time `StateSpace` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + + See Also + -------- + TransferFunction, ZerosPolesGain, lti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `StateSpace` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class StateSpaceDiscrete(StateSpace, dlti): + r""" + Discrete-time Linear Time Invariant system in state-space form. + + Represents the system as the discrete-time difference equation + :math:`x[k+1] = A x[k] + B u[k]`. + `StateSpace` systems inherit additional functionality from the `dlti` + class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[ 1. , 0.1], + [ 0. , 1. ]]), + array([[ 0.005], + [ 0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + pass + + +def lsim(system, U, T, X0=None, interp=True): + """ + Simulate output of a continuous-time linear system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + U : array_like + An input array describing the input at each time `T` + (interpolation is assumed between given times). If there are + multiple inputs, then each column of the rank-2 array + represents an input. If U = 0 or None, a zero input is used. + T : array_like + The time steps at which the input is defined and at which the + output is desired. Must be nonnegative, increasing, and equally spaced. + X0 : array_like, optional + The initial conditions on the state vector (zero by default). + interp : bool, optional + Whether to use linear (True, the default) or zero-order-hold (False) + interpolation for the input array. + + Returns + ------- + T : 1D ndarray + Time values for the output. + yout : 1D ndarray + System response. + xout : ndarray + Time evolution of the state vector. + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + We'll use `lsim` to simulate an analog Bessel filter applied to + a signal. + + >>> import numpy as np + >>> from scipy.signal import bessel, lsim + >>> import matplotlib.pyplot as plt + + Create a low-pass Bessel filter with a cutoff of 12 Hz. + + >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True) + + Generate data to which the filter is applied. + + >>> t = np.linspace(0, 1.25, 500, endpoint=False) + + The input signal is the sum of three sinusoidal curves, with + frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly + eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal. + + >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) + + ... 0.5*np.cos(2*np.pi*80*t)) + + Simulate the filter with `lsim`. + + >>> tout, yout, xout = lsim((b, a), U=u, T=t) + + Plot the result. + + >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input') + >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output') + >>> plt.legend(loc='best', shadow=True, framealpha=1) + >>> plt.grid(alpha=0.3) + >>> plt.xlabel('t') + >>> plt.show() + + In a second example, we simulate a double integrator ``y'' = u``, with + a constant input ``u = 1``. We'll use the state space representation + of the integrator. + + >>> from scipy.signal import lti + >>> A = np.array([[0.0, 1.0], [0.0, 0.0]]) + >>> B = np.array([[0.0], [1.0]]) + >>> C = np.array([[1.0, 0.0]]) + >>> D = 0.0 + >>> system = lti(A, B, C, D) + + `t` and `u` define the time and input signal for the system to + be simulated. + + >>> t = np.linspace(0, 5, num=50) + >>> u = np.ones_like(t) + + Compute the simulation, and then plot `y`. As expected, the plot shows + the curve ``y = 0.5*t**2``. + + >>> tout, y, x = lsim(system, u, t) + >>> plt.plot(t, y) + >>> plt.grid(alpha=0.3) + >>> plt.xlabel('t') + >>> plt.show() + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('lsim can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + T = atleast_1d(T) + if len(T.shape) != 1: + raise ValueError("T must be a rank-1 array.") + + A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D)) + n_states = A.shape[0] + n_inputs = B.shape[1] + + n_steps = T.size + if X0 is None: + X0 = zeros(n_states, sys.A.dtype) + xout = np.empty((n_steps, n_states), sys.A.dtype) + + if T[0] == 0: + xout[0] = X0 + elif T[0] > 0: + # step forward to initial time, with zero input + xout[0] = dot(X0, linalg.expm(transpose(A) * T[0])) + else: + raise ValueError("Initial time must be nonnegative") + + no_input = (U is None or + (isinstance(U, (int, float)) and U == 0.) or + not np.any(U)) + + if n_steps == 1: + yout = squeeze(xout @ C.T) + if not no_input: + yout += squeeze(U @ D.T) + return T, yout, squeeze(xout) + + dt = T[1] - T[0] + if not np.allclose(np.diff(T), dt): + raise ValueError("Time steps are not equally spaced.") + + if no_input: + # Zero input: just use matrix exponential + # take transpose because state is a row vector + expAT_dt = linalg.expm(A.T * dt) + for i in range(1, n_steps): + xout[i] = xout[i-1] @ expAT_dt + yout = squeeze(xout @ C.T) + return T, yout, squeeze(xout) + + # Nonzero input + U = atleast_1d(U) + if U.ndim == 1: + U = U[:, np.newaxis] + + if U.shape[0] != n_steps: + raise ValueError("U must have the same number of rows " + "as elements in T.") + + if U.shape[1] != n_inputs: + raise ValueError("System does not define that many inputs.") + + if not interp: + # Zero-order hold + # Algorithm: to integrate from time 0 to time dt, we solve + # xdot = A x + B u, x(0) = x0 + # udot = 0, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 ] [ u0 ] + M = np.vstack([np.hstack([A * dt, B * dt]), + np.zeros((n_inputs, n_states + n_inputs))]) + # transpose everything because the state and input are row vectors + expMT = linalg.expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd = expMT[n_states:, :n_states] + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd + else: + # Linear interpolation between steps + # Algorithm: to integrate from time 0 to time dt, with linear + # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve + # xdot = A x + B u, x(0) = x0 + # udot = (u1 - u0) / dt, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] + # [u1 - u0] [ 0 0 0 ] [u1 - u0] + M = np.vstack([np.hstack([A * dt, B * dt, + np.zeros((n_states, n_inputs))]), + np.hstack([np.zeros((n_inputs, n_states + n_inputs)), + np.identity(n_inputs)]), + np.zeros((n_inputs, n_states + 2 * n_inputs))]) + expMT = linalg.expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd1 = expMT[n_states+n_inputs:, :n_states] + Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd0 + U[i] @ Bd1 + + yout = squeeze(xout @ C.T) + squeeze(U @ D.T) + return T, yout, squeeze(xout) + + +def _default_response_times(A, n): + """Compute a reasonable set of time samples for the response time. + + This function is used by `impulse` and `step` to compute the response time + when the `T` argument to the function is None. + + Parameters + ---------- + A : array_like + The system matrix, which is square. + n : int + The number of time samples to generate. + + Returns + ------- + t : ndarray + The 1-D array of length `n` of time samples at which the response + is to be computed. + """ + # Create a reasonable time interval. + # TODO: This could use some more work. + # For example, what is expected when the system is unstable? + vals = linalg.eigvals(A) + r = min(abs(real(vals))) + if r == 0.0: + r = 1.0 + tc = 1.0 / r + t = linspace(0.0, 7 * tc, n) + return t + + +def impulse(system, X0=None, T=None, N=None): + """Impulse response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector. Defaults to zero. + T : array_like, optional + Time points. Computed if not given. + N : int, optional + The number of time points to compute (if `T` is not given). + + Returns + ------- + T : ndarray + A 1-D array of time points. + yout : ndarray + A 1-D array containing the impulse response of the system (except for + singularities at zero). + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Compute the impulse response of a second order system with a repeated + root: ``x''(t) + 2*x'(t) + x(t) = u(t)`` + + >>> from scipy import signal + >>> system = ([1.0], [1.0, 2.0, 1.0]) + >>> t, y = signal.impulse(system) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, y) + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('impulse can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if X0 is None: + X = squeeze(sys.B) + else: + X = squeeze(sys.B + X0) + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + + _, h, _ = lsim(sys, 0., T, X, interp=False) + return T, h + + +def step(system, X0=None, T=None, N=None): + """Step response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector (default is zero). + T : array_like, optional + Time points (computed if not given). + N : int, optional + Number of time points to compute if `T` is not given. + + Returns + ------- + T : 1D ndarray + Output time points. + yout : 1D ndarray + Step response of system. + + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> lti = signal.lti([1.0], [1.0, 1.0]) + >>> t, y = signal.step(lti) + >>> plt.plot(t, y) + >>> plt.xlabel('Time [s]') + >>> plt.ylabel('Amplitude') + >>> plt.title('Step response for 1. Order Lowpass') + >>> plt.grid() + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('step can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + U = ones(T.shape, sys.A.dtype) + vals = lsim(sys, U, T, X0=X0, interp=False) + return vals[0], vals[1] + + +def bode(system, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is calculated + for every value in this array. If not given a reasonable set will be + calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = signal.bode(sys) + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + w, y = freqresp(system, w=w, n=n) + + mag = 20.0 * numpy.log10(abs(y)) + phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi + + return w, mag, phase + + +def freqresp(system, w=None, n=10000): + r"""Calculate the frequency response of a continuous-time system. + + Parameters + ---------- + system : an instance of the `lti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is + calculated for every value in this array. If not given, a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`: + + >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) + + >>> w, H = signal.freqresp(s1) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + """ + if isinstance(system, lti): + if isinstance(system, (TransferFunction, ZerosPolesGain)): + sys = system + else: + sys = system._as_zpk() + elif isinstance(system, dlti): + raise AttributeError('freqresp can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_zpk() + + if sys.inputs != 1 or sys.outputs != 1: + raise ValueError("freqresp() requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(sys, TransferFunction): + # In the call to freqs(), sys.num.ravel() is used because there are + # cases where sys.num is a 2-D array with a single row. + w, h = freqs(sys.num.ravel(), sys.den, worN=worN) + + elif isinstance(sys, ZerosPolesGain): + w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) + + return w, h + + +# This class will be used by place_poles to return its results +# see https://code.activestate.com/recipes/52308/ +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + +def _valid_inputs(A, B, poles, method, rtol, maxiter): + """ + Check the poles come in complex conjugage pairs + Check shapes of A, B and poles are compatible. + Check the method chosen is compatible with provided poles + Return update method to use and ordered poles + + """ + poles = np.asarray(poles) + if poles.ndim > 1: + raise ValueError("Poles must be a 1D array like.") + # Will raise ValueError if poles do not come in complex conjugates pairs + poles = _order_complex_poles(poles) + if A.ndim > 2: + raise ValueError("A must be a 2D array/matrix.") + if B.ndim > 2: + raise ValueError("B must be a 2D array/matrix") + if A.shape[0] != A.shape[1]: + raise ValueError("A must be square") + if len(poles) > A.shape[0]: + raise ValueError("maximum number of poles is %d but you asked for %d" % + (A.shape[0], len(poles))) + if len(poles) < A.shape[0]: + raise ValueError("number of poles is %d but you should provide %d" % + (len(poles), A.shape[0])) + r = np.linalg.matrix_rank(B) + for p in poles: + if sum(p == poles) > r: + raise ValueError("at least one of the requested pole is repeated " + "more than rank(B) times") + # Choose update method + update_loop = _YT_loop + if method not in ('KNV0','YT'): + raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") + + if method == "KNV0": + update_loop = _KNV0_loop + if not all(np.isreal(poles)): + raise ValueError("Complex poles are not supported by KNV0") + + if maxiter < 1: + raise ValueError("maxiter must be at least equal to 1") + + # We do not check rtol <= 0 as the user can use a negative rtol to + # force maxiter iterations + if rtol > 1: + raise ValueError("rtol can not be greater than 1") + + return update_loop, poles + + +def _order_complex_poles(poles): + """ + Check we have complex conjugates pairs and reorder P according to YT, ie + real_poles, complex_i, conjugate complex_i, .... + The lexicographic sort on the complex poles is added to help the user to + compare sets of poles. + """ + ordered_poles = np.sort(poles[np.isreal(poles)]) + im_poles = [] + for p in np.sort(poles[np.imag(poles) < 0]): + if np.conj(p) in poles: + im_poles.extend((p, np.conj(p))) + + ordered_poles = np.hstack((ordered_poles, im_poles)) + + if poles.shape[0] != len(ordered_poles): + raise ValueError("Complex poles must come with their conjugates") + return ordered_poles + + +def _KNV0(B, ker_pole, transfer_matrix, j, poles): + """ + Algorithm "KNV0" Kautsky et Al. Robust pole + assignment in linear state feedback, Int journal of Control + 1985, vol 41 p 1129->1155 + https://la.epfl.ch/files/content/sites/la/files/ + users/105941/public/KautskyNicholsDooren + + """ + # Remove xj form the base + transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1) + # If we QR this matrix in full mode Q=Q0|Q1 + # then Q1 will be a single column orthogonnal to + # Q0, that's what we are looking for ! + + # After merge of gh-4249 great speed improvements could be achieved + # using QR updates instead of full QR in the line below + + # To debug with numpy qr uncomment the line below + # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") + Q, R = s_qr(transfer_matrix_not_j, mode="full") + + mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T) + yj = np.dot(mat_ker_pj, Q[:, -1]) + + # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its + # projection into ker_pole[j] will yield a vector + # close to 0. As we are looking for a vector in ker_pole[j] + # simply stick with transfer_matrix[:, j] (unless someone provides me with + # a better choice ?) + + if not np.allclose(yj, 0): + xj = yj/np.linalg.norm(yj) + transfer_matrix[:, j] = xj + + # KNV does not support complex poles, using YT technique the two lines + # below seem to work 9 out of 10 times but it is not reliable enough: + # transfer_matrix[:, j]=real(xj) + # transfer_matrix[:, j+1]=imag(xj) + + # Add this at the beginning of this function if you wish to test + # complex support: + # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): + # return + # Problems arise when imag(xj)=>0 I have no idea on how to fix this + + +def _YT_real(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.1 page 19 related to real pairs + """ + # step 1 page 19 + u = Q[:, -2, np.newaxis] + v = Q[:, -1, np.newaxis] + + # step 2 page 19 + m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - + np.dot(v, u.T)), ker_pole[j]) + + # step 3 page 19 + um, sm, vm = np.linalg.svd(m) + # mu1, mu2 two first columns of U => 2 first lines of U.T + mu1, mu2 = um.T[:2, :, np.newaxis] + # VM is V.T with numpy we want the first two lines of V.T + nu1, nu2 = vm[:2, :, np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + transfer_matrix_j_mo_transfer_matrix_j = np.vstack(( + transfer_matrix[:, i, np.newaxis], + transfer_matrix[:, j, np.newaxis])) + + if not np.allclose(sm[0], sm[1]): + ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1) + ker_pole_i_nu1 = np.dot(ker_pole[j], nu1) + ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) + else: + ker_pole_ij = np.vstack(( + np.hstack((ker_pole[i], + np.zeros(ker_pole[i].shape))), + np.hstack((np.zeros(ker_pole[j].shape), + ker_pole[j])) + )) + mu_nu_matrix = np.vstack( + (np.hstack((mu1, mu2)), np.hstack((nu1, nu2))) + ) + ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix) + transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T), + transfer_matrix_j_mo_transfer_matrix_j) + if not np.allclose(transfer_matrix_ij, 0): + transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij / + np.linalg.norm(transfer_matrix_ij)) + transfer_matrix[:, i] = transfer_matrix_ij[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = transfer_matrix_ij[ + transfer_matrix[:, i].shape[0]:, 0 + ] + else: + # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to + # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to + # ker_pole_mu_nu and iterate. As we are looking for a vector in + # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help + # (that's a guess, not a claim !) + transfer_matrix[:, i] = ker_pole_mu_nu[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = ker_pole_mu_nu[ + transfer_matrix[:, i].shape[0]:, 0 + ] + + +def _YT_complex(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.2 page 20 related to complex pairs + """ + # step 1 page 20 + ur = np.sqrt(2)*Q[:, -2, np.newaxis] + ui = np.sqrt(2)*Q[:, -1, np.newaxis] + u = ur + 1j*ui + + # step 2 page 20 + ker_pole_ij = ker_pole[i] + m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - + np.dot(np.conj(u), u.T)), ker_pole_ij) + + # step 3 page 20 + e_val, e_vec = np.linalg.eig(m) + # sort eigenvalues according to their module + e_val_idx = np.argsort(np.abs(e_val)) + mu1 = e_vec[:, e_val_idx[-1], np.newaxis] + mu2 = e_vec[:, e_val_idx[-2], np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + + # remember transfer_matrix_i has been split as + # transfer_matrix[i]=real(transfer_matrix_i) and + # transfer_matrix[j]=imag(transfer_matrix_i) + transfer_matrix_j_mo_transfer_matrix_j = ( + transfer_matrix[:, i, np.newaxis] + + 1j*transfer_matrix[:, j, np.newaxis] + ) + if not np.allclose(np.abs(e_val[e_val_idx[-1]]), + np.abs(e_val[e_val_idx[-2]])): + ker_pole_mu = np.dot(ker_pole_ij, mu1) + else: + mu1_mu2_matrix = np.hstack((mu1, mu2)) + ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix) + transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)), + transfer_matrix_j_mo_transfer_matrix_j) + + if not np.allclose(transfer_matrix_i_j, 0): + transfer_matrix_i_j = (transfer_matrix_i_j / + np.linalg.norm(transfer_matrix_i_j)) + transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0]) + transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0]) + else: + # same idea as in YT_real + transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0]) + transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0]) + + +def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Algorithm "YT" Tits, Yang. Globally Convergent + Algorithms for Robust Pole Assignment by State Feedback + https://hdl.handle.net/1903/5598 + The poles P have to be sorted accordingly to section 6.2 page 20 + + """ + # The IEEE edition of the YT paper gives useful information on the + # optimal update order for the real poles in order to minimize the number + # of times we have to loop over all poles, see page 1442 + nb_real = poles[np.isreal(poles)].shape[0] + # hnb => Half Nb Real + hnb = nb_real // 2 + + # Stick to the indices in the paper and then remove one to get numpy array + # index it is a bit easier to link the code to the paper this way even if it + # is not very clean. The paper is unclear about what should be done when + # there is only one real pole => use KNV0 on this real pole seem to work + if nb_real > 0: + #update the biggest real pole with the smallest one + update_order = [[nb_real], [1]] + else: + update_order = [[],[]] + + r_comp = np.arange(nb_real+1, len(poles)+1, 2) + # step 1.a + r_p = np.arange(1, hnb+nb_real % 2) + update_order[0].extend(2*r_p) + update_order[1].extend(2*r_p+1) + # step 1.b + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 1.c + r_p = np.arange(1, hnb+1) + update_order[0].extend(2*r_p-1) + update_order[1].extend(2*r_p) + # step 1.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.a + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+j) + # step 2.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.c + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(hnb+1, nb_real+1): + idx_1 = i+j + if idx_1 > nb_real: + idx_1 = i+j-nb_real + update_order[0].append(i) + update_order[1].append(idx_1) + # step 2.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 3.a + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+hnb) + # step 3.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + + update_order = np.array(update_order).T-1 + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for i, j in update_order: + if i == j: + assert i == 0, "i!=0 for KNV call in YT" + assert np.isreal(poles[i]), "calling KNV on a complex pole" + _KNV0(B, ker_pole, transfer_matrix, i, poles) + else: + transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j), + axis=1) + # after merge of gh-4249 great speed improvements could be + # achieved using QR updates instead of full QR in the line below + + #to debug with numpy qr uncomment the line below + #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") + Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") + + if np.isreal(poles[i]): + assert np.isreal(poles[j]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_real(ker_pole, Q, transfer_matrix, i, j) + else: + assert ~np.isreal(poles[i]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_complex(ker_pole, Q, transfer_matrix, i, j) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs( + (det_transfer_matrix - + det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + nb_try += 1 + return stop, cur_rtol, nb_try + + +def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Loop over all poles one by one and apply KNV method 0 algorithm + """ + # This method is useful only because we need to be able to call + # _KNV0 from YT without looping over all poles, otherwise it would + # have been fine to mix _KNV0_loop and _KNV0 in a single function + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for j in range(B.shape[0]): + _KNV0(B, ker_pole, transfer_matrix, j, poles) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + + nb_try += 1 + return stop, cur_rtol, nb_try + + +def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): + """ + Compute K such that eigenvalues (A - dot(B, K))=poles. + + K is the gain matrix such as the plant described by the linear system + ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, + as close as possible to those asked for in poles. + + SISO, MISO and MIMO systems are supported. + + Parameters + ---------- + A, B : ndarray + State-space representation of linear system ``AX + BU``. + poles : array_like + Desired real poles and/or complex conjugates poles. + Complex poles are only supported with ``method="YT"`` (default). + method: {'YT', 'KNV0'}, optional + Which method to choose to find the gain matrix K. One of: + + - 'YT': Yang Tits + - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 + + See References and Notes for details on the algorithms. + rtol: float, optional + After each iteration the determinant of the eigenvectors of + ``A - B*K`` is compared to its previous value, when the relative + error between these two values becomes lower than `rtol` the algorithm + stops. Default is 1e-3. + maxiter: int, optional + Maximum number of iterations to compute the gain matrix. + Default is 30. + + Returns + ------- + full_state_feedback : Bunch object + full_state_feedback is composed of: + gain_matrix : 1-D ndarray + The closed loop matrix K such as the eigenvalues of ``A-BK`` + are as close as possible to the requested poles. + computed_poles : 1-D ndarray + The poles corresponding to ``A-BK`` sorted as first the real + poles in increasing order, then the complex congugates in + lexicographic order. + requested_poles : 1-D ndarray + The poles the algorithm was asked to place sorted as above, + they may differ from what was achieved. + X : 2-D ndarray + The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` + (see Notes) + rtol : float + The relative tolerance achieved on ``det(X)`` (see Notes). + `rtol` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + nb_iter : int + The number of iterations performed before converging. + `nb_iter` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + + Notes + ----- + The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et + al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer + matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses + rank-2 updates. This yields on average more robust solutions (see [2]_ + pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV + does not in its original version. Only update method 0 proposed by KNV has + been implemented here, hence the name ``'KNV0'``. + + KNV extended to complex poles is used in Matlab's ``place`` function, YT is + distributed under a non-free licence by Slicot under the name ``robpole``. + It is unclear and undocumented how KNV0 has been extended to complex poles + (Tits and Yang claim on page 14 of their paper that their method can not be + used to extend KNV to complex poles), therefore only YT supports them in + this implementation. + + As the solution to the problem of pole placement is not unique for MIMO + systems, both methods start with a tentative transfer matrix which is + altered in various way to increase its determinant. Both methods have been + proven to converge to a stable solution, however depending on the way the + initial transfer matrix is chosen they will converge to different + solutions and therefore there is absolutely no guarantee that using + ``'KNV0'`` will yield results similar to Matlab's or any other + implementation of these algorithms. + + Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` + is only provided because it is needed by ``'YT'`` in some specific cases. + Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` + when ``abs(det(X))`` is used as a robustness indicator. + + [2]_ is available as a technical report on the following URL: + https://hdl.handle.net/1903/5598 + + References + ---------- + .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment + in linear state feedback", International Journal of Control, Vol. 41 + pp. 1129-1155, 1985. + .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust + pole assignment by state feedback", IEEE Transactions on Automatic + Control, Vol. 41, pp. 1432-1452, 1996. + + Examples + -------- + A simple example demonstrating real pole placement using both KNV and YT + algorithms. This is example number 1 from section 4 of the reference KNV + publication ([1]_): + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ], + ... [-0.5814, -4.290, 0, 0.6750 ], + ... [ 1.067, 4.273, -6.654, 5.893 ], + ... [ 0.0480, 4.273, 1.343, -2.104 ]]) + >>> B = np.array([[ 0, 5.679 ], + ... [ 1.136, 1.136 ], + ... [ 0, 0, ], + ... [-3.146, 0 ]]) + >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + Now compute K with KNV method 0, with the default YT method and with the YT + method while forcing 100 iterations of the algorithm and print some results + after each call. + + >>> fsf1 = signal.place_poles(A, B, P, method='KNV0') + >>> fsf1.gain_matrix + array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785], + [ 0.50587268, 0.57779091, 0.51795763, -0.41991442]]) + + >>> fsf2 = signal.place_poles(A, B, P) # uses YT method + >>> fsf2.computed_poles + array([-8.6659, -5.0566, -0.5 , -0.2 ]) + + >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100) + >>> fsf3.X + array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j], + [-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j], + [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j], + [ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]]) + + The absolute value of the determinant of X is a good indicator to check the + robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing + it. Below a comparison of the robustness of the results above: + + >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X)) + True + >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X)) + True + + Now a simple example for complex poles: + + >>> A = np.array([[ 0, 7/3., 0, 0 ], + ... [ 0, 0, 0, 7/9. ], + ... [ 0, 0, 0, 0 ], + ... [ 0, 0, 0, 0 ]]) + >>> B = np.array([[ 0, 0 ], + ... [ 0, 0 ], + ... [ 1, 0 ], + ... [ 0, 1 ]]) + >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3. + >>> fsf = signal.place_poles(A, B, P, method='YT') + + We can plot the desired and computed poles in the complex plane: + + >>> t = np.linspace(0, 2*np.pi, 401) + >>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle + >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag, + ... 'wo', label='Desired') + >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx', + ... label='Placed') + >>> plt.grid() + >>> plt.axis('image') + >>> plt.axis([-1.1, 1.1, -1.1, 1.1]) + >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1) + + """ + # Move away all the inputs checking, it only adds noise to the code + update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) + + # The current value of the relative tolerance we achieved + cur_rtol = 0 + # The number of iterations needed before converging + nb_iter = 0 + + # Step A: QR decomposition of B page 1132 KN + # to debug with numpy qr uncomment the line below + # u, z = np.linalg.qr(B, mode="complete") + u, z = s_qr(B, mode="full") + rankB = np.linalg.matrix_rank(B) + u0 = u[:, :rankB] + u1 = u[:, rankB:] + z = z[:rankB, :] + + # If we can use the identity matrix as X the solution is obvious + if B.shape[0] == rankB: + # if B is square and full rank there is only one solution + # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) + # i.e K=inv(B)*(diag(P)-A) + # if B has as many lines as its rank (but not square) there are many + # solutions and we can choose one using least squares + # => use lstsq in both cases. + # In both cases the transfer matrix X will be eye(A.shape[0]) and I + # can hardly think of a better one so there is nothing to optimize + # + # for complex poles we use the following trick + # + # |a -b| has for eigenvalues a+b and a-b + # |b a| + # + # |a+bi 0| has the obvious eigenvalues a+bi and a-bi + # |0 a-bi| + # + # e.g solving the first one in R gives the solution + # for the second one in C + diag_poles = np.zeros(A.shape) + idx = 0 + while idx < poles.shape[0]: + p = poles[idx] + diag_poles[idx, idx] = np.real(p) + if ~np.isreal(p): + diag_poles[idx, idx+1] = -np.imag(p) + diag_poles[idx+1, idx+1] = np.real(p) + diag_poles[idx+1, idx] = np.imag(p) + idx += 1 # skip next one + idx += 1 + gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0] + transfer_matrix = np.eye(A.shape[0]) + cur_rtol = np.nan + nb_iter = np.nan + else: + # step A (p1144 KNV) and beginning of step F: decompose + # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors + # in the same loop + ker_pole = [] + + # flag to skip the conjugate of a complex pole + skip_conjugate = False + # select orthonormal base ker_pole for each Pole and vectors for + # transfer_matrix + for j in range(B.shape[0]): + if skip_conjugate: + skip_conjugate = False + continue + pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T + + # after QR Q=Q0|Q1 + # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. + # Q1 is orthogonnal to Q0 and will be multiplied by the zeros in + # R when using mode "complete". In default mode Q1 and the zeros + # in R are not computed + + # To debug with numpy qr uncomment the line below + # Q, _ = np.linalg.qr(pole_space_j, mode="complete") + Q, _ = s_qr(pole_space_j, mode="full") + + ker_pole_j = Q[:, pole_space_j.shape[1]:] + + # We want to select one vector in ker_pole_j to build the transfer + # matrix, however qr returns sometimes vectors with zeros on the + # same line for each pole and this yields very long convergence + # times. + # Or some other times a set of vectors, one with zero imaginary + # part and one (or several) with imaginary parts. After trying + # many ways to select the best possible one (eg ditch vectors + # with zero imaginary part for complex poles) I ended up summing + # all vectors in ker_pole_j, this solves 100% of the problems and + # is a valid choice for transfer_matrix. + # This way for complex poles we are sure to have a non zero + # imaginary part that way, and the problem of lines full of zeros + # in transfer_matrix is solved too as when a vector from + # ker_pole_j has a zero the other one(s) when + # ker_pole_j.shape[1]>1) for sure won't have a zero there. + + transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis] + transfer_matrix_j = (transfer_matrix_j / + np.linalg.norm(transfer_matrix_j)) + if ~np.isreal(poles[j]): # complex pole + transfer_matrix_j = np.hstack([np.real(transfer_matrix_j), + np.imag(transfer_matrix_j)]) + ker_pole.extend([ker_pole_j, ker_pole_j]) + + # Skip next pole as it is the conjugate + skip_conjugate = True + else: # real pole, nothing to do + ker_pole.append(ker_pole_j) + + if j == 0: + transfer_matrix = transfer_matrix_j + else: + transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j)) + + if rankB > 1: # otherwise there is nothing we can optimize + stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, + poles, B, maxiter, rtol) + if not stop and rtol > 0: + # if rtol<=0 the user has probably done that on purpose, + # don't annoy him + err_msg = ( + "Convergence was not reached after maxiter iterations.\n" + f"You asked for a tolerance of {rtol}, we got {cur_rtol}." + ) + warnings.warn(err_msg, stacklevel=2) + + # reconstruct transfer_matrix to match complex conjugate pairs, + # ie transfer_matrix_j/transfer_matrix_j+1 are + # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after + transfer_matrix = transfer_matrix.astype(complex) + idx = 0 + while idx < poles.shape[0]-1: + if ~np.isreal(poles[idx]): + rel = transfer_matrix[:, idx].copy() + img = transfer_matrix[:, idx+1] + # rel will be an array referencing a column of transfer_matrix + # if we don't copy() it will changer after the next line and + # and the line after will not yield the correct value + transfer_matrix[:, idx] = rel-1j*img + transfer_matrix[:, idx+1] = rel+1j*img + idx += 1 # skip next one + idx += 1 + + try: + m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles), + transfer_matrix.T)).T + gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A)) + except np.linalg.LinAlgError as e: + raise ValueError("The poles you've chosen can't be placed. " + "Check the controllability matrix and try " + "another set of poles") from e + + # Beware: Kautsky solves A+BK but the usual form is A-BK + gain_matrix = -gain_matrix + # K still contains complex with ~=0j imaginary parts, get rid of them + gain_matrix = np.real(gain_matrix) + + full_state_feedback = Bunch() + full_state_feedback.gain_matrix = gain_matrix + full_state_feedback.computed_poles = _order_complex_poles( + np.linalg.eig(A - np.dot(B, gain_matrix))[0] + ) + full_state_feedback.requested_poles = poles + full_state_feedback.X = transfer_matrix + full_state_feedback.rtol = cur_rtol + full_state_feedback.nb_iter = nb_iter + + return full_state_feedback + + +def dlsim(system, u, t=None, x0=None): + """ + Simulate output of a discrete-time linear system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + u : array_like + An input array describing the input at each time `t` (interpolation is + assumed between given times). If there are multiple inputs, then each + column of the rank-2 array represents an input. + t : array_like, optional + The time steps at which the input is defined. If `t` is given, it + must be the same length as `u`, and the final value in `t` determines + the number of steps returned in the output. + x0 : array_like, optional + The initial conditions on the state vector (zero by default). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : ndarray + System response, as a 1-D array. + xout : ndarray, optional + Time-evolution of the state-vector. Only generated if the input is a + `StateSpace` system. + + See Also + -------- + lsim, dstep, dimpulse, cont2discrete + + Examples + -------- + A simple integrator transfer function with a discrete time step of 1.0 + could be implemented as: + + >>> import numpy as np + >>> from scipy import signal + >>> tf = ([1.0,], [1.0, -1.0], 1.0) + >>> t_in = [0.0, 1.0, 2.0, 3.0] + >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) + >>> t_out, y = signal.dlsim(tf, u, t=t_in) + >>> y.T + array([[ 0., 0., 0., 1.]]) + + """ + # Convert system to dlti-StateSpace + if isinstance(system, lti): + raise AttributeError('dlsim can only be used with discrete-time dlti ' + 'systems.') + elif not isinstance(system, dlti): + system = dlti(*system[:-1], dt=system[-1]) + + # Condition needed to ensure output remains compatible + is_ss_input = isinstance(system, StateSpace) + system = system._as_ss() + + u = np.atleast_1d(u) + + if u.ndim == 1: + u = np.atleast_2d(u).T + + if t is None: + out_samples = len(u) + stoptime = (out_samples - 1) * system.dt + else: + stoptime = t[-1] + out_samples = int(np.floor(stoptime / system.dt)) + 1 + + # Pre-build output arrays + xout = np.zeros((out_samples, system.A.shape[0])) + yout = np.zeros((out_samples, system.C.shape[0])) + tout = np.linspace(0.0, stoptime, num=out_samples) + + # Check initial condition + if x0 is None: + xout[0, :] = np.zeros((system.A.shape[1],)) + else: + xout[0, :] = np.asarray(x0) + + # Pre-interpolate inputs into the desired time steps + if t is None: + u_dt = u + else: + if len(u.shape) == 1: + u = u[:, np.newaxis] + + u_dt = make_interp_spline(t, u, k=1)(tout) + + # Simulate the system + for i in range(0, out_samples - 1): + xout[i+1, :] = (np.dot(system.A, xout[i, :]) + + np.dot(system.B, u_dt[i, :])) + yout[i, :] = (np.dot(system.C, xout[i, :]) + + np.dot(system.D, u_dt[i, :])) + + # Last point + yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) + + np.dot(system.D, u_dt[out_samples-1, :])) + + if is_ss_input: + return tout, yout, xout + else: + return tout, yout + + +def dimpulse(system, x0=None, t=None, n=None): + """ + Impulse response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : tuple of ndarray + Impulse response of system. Each element of the tuple represents + the output of the system based on an impulse in each input. + + See Also + -------- + impulse, dstep, dlsim, cont2discrete + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> butter = signal.dlti(*signal.butter(3, 0.5)) + >>> t, y = signal.dimpulse(butter, n=25) + >>> plt.step(t, np.squeeze(y)) + >>> plt.grid() + >>> plt.xlabel('n [samples]') + >>> plt.ylabel('Amplitude') + + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dimpulse can only be used with discrete-time ' + 'dlti systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[0, i] = 1.0 + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dstep(system, x0=None, t=None, n=None): + """ + Step response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Output time points, as a 1-D array. + yout : tuple of ndarray + Step response of system. Each element of the tuple represents + the output of the system based on a step response to each input. + + See Also + -------- + step, dimpulse, dlsim, cont2discrete + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> butter = signal.dlti(*signal.butter(3, 0.5)) + >>> t, y = signal.dstep(butter, n=25) + >>> plt.step(t, np.squeeze(y)) + >>> plt.grid() + >>> plt.xlabel('n [samples]') + >>> plt.ylabel('Amplitude') + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dstep can only be used with discrete-time dlti ' + 'systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[:, i] = np.ones((t.shape[0],)) + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dfreqresp(system, w=None, n=10000, whole=False): + r""" + Calculate the frequency response of a discrete-time system. + + Parameters + ---------- + system : an instance of the `dlti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (numerator, denominator, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + whole : bool, optional + Normally, if 'w' is not given, frequencies are computed from 0 to the + Nyquist frequency, pi radians/sample (upper-half of unit-circle). If + `whole` is True, compute frequencies from 0 to 2*pi radians/sample. + + Returns + ------- + w : 1D ndarray + Frequency array [radians/sample] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function + :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05 + seconds: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) + + >>> w, H = signal.dfreqresp(sys) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + + """ + if not isinstance(system, dlti): + if isinstance(system, lti): + raise AttributeError('dfreqresp can only be used with ' + 'discrete-time systems.') + + system = dlti(*system[:-1], dt=system[-1]) + + if isinstance(system, StateSpace): + # No SS->ZPK code exists right now, just SS->TF->ZPK + system = system._as_tf() + + if not isinstance(system, (TransferFunction, ZerosPolesGain)): + raise ValueError('Unknown system type') + + if system.inputs != 1 or system.outputs != 1: + raise ValueError("dfreqresp requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(system, TransferFunction): + # Convert numerator and denominator from polynomials in the variable + # 'z' to polynomials in the variable 'z^-1', as freqz expects. + num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den) + w, h = freqz(num, den, worN=worN, whole=whole) + + elif isinstance(system, ZerosPolesGain): + w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN, + whole=whole) + + return w, h + + +def dbode(system, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (num, den, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/time_unit] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with + a sampling time of 0.05 seconds: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) + + Equivalent: sys.bode() + + >>> w, mag, phase = signal.dbode(sys) + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + w, y = dfreqresp(system, w=w, n=n) + + if isinstance(system, dlti): + dt = system.dt + else: + dt = system[-1] + + mag = 20.0 * numpy.log10(abs(y)) + phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y))) + + return w / dt, mag, phase diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..4d64beaca86d1da50b563668679b6fc52c954ab0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py @@ -0,0 +1,139 @@ +# Author: Eric Larson +# 2014 + +"""Tools for MLS generation""" + +import numpy as np + +from ._max_len_seq_inner import _max_len_seq_inner + +__all__ = ['max_len_seq'] + + +# These are definitions of linear shift register taps for use in max_len_seq() +_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1], + 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8], + 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14], + 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21], + 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20], + 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7], + 31: [28], 32: [31, 30, 10]} + +def max_len_seq(nbits, state=None, length=None, taps=None): + """ + Maximum length sequence (MLS) generator. + + Parameters + ---------- + nbits : int + Number of bits to use. Length of the resulting sequence will + be ``(2**nbits) - 1``. Note that generating long sequences + (e.g., greater than ``nbits == 16``) can take a long time. + state : array_like, optional + If array, must be of length ``nbits``, and will be cast to binary + (bool) representation. If None, a seed of ones will be used, + producing a repeatable representation. If ``state`` is all + zeros, an error is raised as this is invalid. Default: None. + length : int, optional + Number of samples to compute. If None, the entire length + ``(2**nbits) - 1`` is computed. + taps : array_like, optional + Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence). + If None, taps will be automatically selected (for up to + ``nbits == 32``). + + Returns + ------- + seq : array + Resulting MLS sequence of 0's and 1's. + state : array + The final state of the shift register. + + Notes + ----- + The algorithm for MLS generation is generically described in: + + https://en.wikipedia.org/wiki/Maximum_length_sequence + + The default values for taps are specifically taken from the first + option listed for each value of ``nbits`` in: + + https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm + + .. versionadded:: 0.15.0 + + Examples + -------- + MLS uses binary convention: + + >>> from scipy.signal import max_len_seq + >>> max_len_seq(4)[0] + array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8) + + MLS has a white spectrum (except for DC): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, ifft, fftshift, fftfreq + >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1 + >>> spec = fft(seq) + >>> N = len(seq) + >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Circular autocorrelation of MLS is an impulse: + + >>> acorrcirc = ifft(spec * np.conj(spec)).real + >>> plt.figure() + >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Linear autocorrelation of MLS is approximately an impulse: + + >>> acorr = np.correlate(seq, seq, 'full') + >>> plt.figure() + >>> plt.plot(np.arange(-N+1, N), acorr, '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + """ + taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64 + if taps is None: + if nbits not in _mls_taps: + known_taps = np.array(list(_mls_taps.keys())) + raise ValueError(f'nbits must be between {known_taps.min()} and ' + f'{known_taps.max()} if taps is None') + taps = np.array(_mls_taps[nbits], taps_dtype) + else: + taps = np.unique(np.array(taps, taps_dtype))[::-1] + if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1: + raise ValueError('taps must be non-empty with values between ' + 'zero and nbits (inclusive)') + taps = np.array(taps) # needed for Cython and Pythran + n_max = (2**nbits) - 1 + if length is None: + length = n_max + else: + length = int(length) + if length < 0: + raise ValueError('length must be greater than or equal to 0') + # We use int8 instead of bool here because NumPy arrays of bools + # don't seem to work nicely with Cython + if state is None: + state = np.ones(nbits, dtype=np.int8, order='c') + else: + # makes a copy if need be, ensuring it's 0's and 1's + state = np.array(state, dtype=bool, order='c').astype(np.int8) + if state.ndim != 1 or state.size != nbits: + raise ValueError('state must be a 1-D array of size nbits') + if np.all(state == 0): + raise ValueError('state must not be all zeros') + + seq = np.empty(length, dtype=np.int8, order='c') + state = _max_len_seq_inner(taps, state, nbits, length, seq) + return seq, state diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..339178fbbf48cad935875d32776d0f57d13f089d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py @@ -0,0 +1,357 @@ +import numpy as np +from scipy.linalg import lstsq +from scipy._lib._util import float_factorial +from scipy.ndimage import convolve1d +from ._arraytools import axis_slice + + +def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None, + use="conv"): + """Compute the coefficients for a 1-D Savitzky-Golay FIR filter. + + Parameters + ---------- + window_length : int + The length of the filter window (i.e., the number of coefficients). + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. + pos : int or None, optional + If pos is not None, it specifies evaluation position within the + window. The default is the middle of the window. + use : str, optional + Either 'conv' or 'dot'. This argument chooses the order of the + coefficients. The default is 'conv', which means that the + coefficients are ordered to be used in a convolution. With + use='dot', the order is reversed, so the filter is applied by + dotting the coefficients with the data set. + + Returns + ------- + coeffs : 1-D ndarray + The filter coefficients. + + See Also + -------- + savgol_filter + + Notes + ----- + .. versionadded:: 0.14.0 + + References + ---------- + A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by + Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), + pp 1627-1639. + Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and + differentiation filter for even number data. Signal Process. + 85, 7 (July 2005), 1429-1434. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_coeffs + >>> savgol_coeffs(5, 2) + array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429]) + >>> savgol_coeffs(5, 2, deriv=1) + array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01, + -2.00000000e-01]) + + Note that use='dot' simply reverses the coefficients. + + >>> savgol_coeffs(5, 2, pos=3) + array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714]) + >>> savgol_coeffs(5, 2, pos=3, use='dot') + array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286]) + >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot') + array([0.45, -0.85, -0.65, 1.05]) + + `x` contains data from the parabola x = t**2, sampled at + t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the + derivative at the last position. When dotted with `x` the result should + be 6. + + >>> x = np.array([1, 0, 1, 4, 9]) + >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot') + >>> c.dot(x) + 6.0 + """ + + # An alternative method for finding the coefficients when deriv=0 is + # t = np.arange(window_length) + # unit = (t == pos).astype(int) + # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t) + # The method implemented here is faster. + + # To recreate the table of sample coefficients shown in the chapter on + # the Savitzy-Golay filter in the Numerical Recipes book, use + # window_length = nL + nR + 1 + # pos = nL + 1 + # c = savgol_coeffs(window_length, M, pos=pos, use='dot') + + if polyorder >= window_length: + raise ValueError("polyorder must be less than window_length.") + + halflen, rem = divmod(window_length, 2) + + if pos is None: + if rem == 0: + pos = halflen - 0.5 + else: + pos = halflen + + if not (0 <= pos < window_length): + raise ValueError("pos must be nonnegative and less than " + "window_length.") + + if use not in ['conv', 'dot']: + raise ValueError("`use` must be 'conv' or 'dot'") + + if deriv > polyorder: + coeffs = np.zeros(window_length) + return coeffs + + # Form the design matrix A. The columns of A are powers of the integers + # from -pos to window_length - pos - 1. The powers (i.e., rows) range + # from 0 to polyorder. (That is, A is a vandermonde matrix, but not + # necessarily square.) + x = np.arange(-pos, window_length - pos, dtype=float) + + if use == "conv": + # Reverse so that result can be used in a convolution. + x = x[::-1] + + order = np.arange(polyorder + 1).reshape(-1, 1) + A = x ** order + + # y determines which order derivative is returned. + y = np.zeros(polyorder + 1) + # The coefficient assigned to y[deriv] scales the result to take into + # account the order of the derivative and the sample spacing. + y[deriv] = float_factorial(deriv) / (delta ** deriv) + + # Find the least-squares solution of A*c = y + coeffs, _, _, _ = lstsq(A, y) + + return coeffs + + +def _polyder(p, m): + """Differentiate polynomials represented with coefficients. + + p must be a 1-D or 2-D array. In the 2-D case, each column gives + the coefficients of a polynomial; the first row holds the coefficients + associated with the highest power. m must be a nonnegative integer. + (numpy.polyder doesn't handle the 2-D case.) + """ + + if m == 0: + result = p + else: + n = len(p) + if n <= m: + result = np.zeros_like(p[:1, ...]) + else: + dp = p[:-m].copy() + for k in range(m): + rng = np.arange(n - k - 1, m - k - 1, -1) + dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1)) + result = dp + return result + + +def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, + axis, polyorder, deriv, delta, y): + """ + Given an N-d array `x` and the specification of a slice of `x` from + `window_start` to `window_stop` along `axis`, create an interpolating + polynomial of each 1-D slice, and evaluate that polynomial in the slice + from `interp_start` to `interp_stop`. Put the result into the + corresponding slice of `y`. + """ + + # Get the edge into a (window_length, -1) array. + x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis) + if axis == 0 or axis == -x.ndim: + xx_edge = x_edge + swapped = False + else: + xx_edge = x_edge.swapaxes(axis, 0) + swapped = True + xx_edge = xx_edge.reshape(xx_edge.shape[0], -1) + + # Fit the edges. poly_coeffs has shape (polyorder + 1, -1), + # where '-1' is the same as in xx_edge. + poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start), + xx_edge, polyorder) + + if deriv > 0: + poly_coeffs = _polyder(poly_coeffs, deriv) + + # Compute the interpolated values for the edge. + i = np.arange(interp_start - window_start, interp_stop - window_start) + values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv) + + # Now put the values into the appropriate slice of y. + # First reshape values to match y. + shp = list(y.shape) + shp[0], shp[axis] = shp[axis], shp[0] + values = values.reshape(interp_stop - interp_start, *shp[1:]) + if swapped: + values = values.swapaxes(0, axis) + # Get a view of the data to be replaced by values. + y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis) + y_edge[...] = values + + +def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y): + """ + Use polynomial interpolation of x at the low and high ends of the axis + to fill in the halflen values in y. + + This function just calls _fit_edge twice, once for each end of the axis. + """ + halflen = window_length // 2 + _fit_edge(x, 0, window_length, 0, halflen, axis, + polyorder, deriv, delta, y) + n = x.shape[axis] + _fit_edge(x, n - window_length, n, n - halflen, n, axis, + polyorder, deriv, delta, y) + + +def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, + axis=-1, mode='interp', cval=0.0): + """ Apply a Savitzky-Golay filter to an array. + + This is a 1-D filter. If `x` has dimension greater than 1, `axis` + determines the axis along which the filter is applied. + + Parameters + ---------- + x : array_like + The data to be filtered. If `x` is not a single or double precision + floating point array, it will be converted to type ``numpy.float64`` + before filtering. + window_length : int + The length of the filter window (i.e., the number of coefficients). + If `mode` is 'interp', `window_length` must be less than or equal + to the size of `x`. + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. Default is 1.0. + axis : int, optional + The axis of the array `x` along which the filter is to be applied. + Default is -1. + mode : str, optional + Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This + determines the type of extension to use for the padded signal to + which the filter is applied. When `mode` is 'constant', the padding + value is given by `cval`. See the Notes for more details on 'mirror', + 'constant', 'wrap', and 'nearest'. + When the 'interp' mode is selected (the default), no extension + is used. Instead, a degree `polyorder` polynomial is fit to the + last `window_length` values of the edges, and this polynomial is + used to evaluate the last `window_length // 2` output values. + cval : scalar, optional + Value to fill past the edges of the input if `mode` is 'constant'. + Default is 0.0. + + Returns + ------- + y : ndarray, same shape as `x` + The filtered data. + + See Also + -------- + savgol_coeffs + + Notes + ----- + Details on the `mode` options: + + 'mirror': + Repeats the values at the edges in reverse order. The value + closest to the edge is not included. + 'nearest': + The extension contains the nearest input value. + 'constant': + The extension contains the value given by the `cval` argument. + 'wrap': + The extension contains the values from the other end of the array. + + For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and + `window_length` is 7, the following shows the extended data for + the various `mode` options (assuming `cval` is 0):: + + mode | Ext | Input | Ext + -----------+---------+------------------------+--------- + 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5 + 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 + 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 + 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_filter + >>> np.set_printoptions(precision=2) # For compact display. + >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9]) + + Filter with a window length of 5 and a degree 2 polynomial. Use + the defaults for all other parameters. + + >>> savgol_filter(x, 5, 2) + array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ]) + + Note that the last five values in x are samples of a parabola, so + when mode='interp' (the default) is used with polyorder=2, the last + three values are unchanged. Compare that to, for example, + `mode='nearest'`: + + >>> savgol_filter(x, 5, 2, mode='nearest') + array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97]) + + """ + if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: + raise ValueError("mode must be 'mirror', 'constant', 'nearest' " + "'wrap' or 'interp'.") + + x = np.asarray(x) + # Ensure that x is either single or double precision floating point. + if x.dtype != np.float64 and x.dtype != np.float32: + x = x.astype(np.float64) + + coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) + + if mode == "interp": + if window_length > x.shape[axis]: + raise ValueError("If mode is 'interp', window_length must be less " + "than or equal to the size of x.") + + # Do not pad. Instead, for the elements within `window_length // 2` + # of the ends of the sequence, use the polynomial that is fitted to + # the last `window_length` elements. + y = convolve1d(x, coeffs, axis=axis, mode="constant") + _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y) + else: + # Any mode other than 'interp' is passed on to ndimage.convolve1d. + y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval) + + return y diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..a43545957105a7ff7088dca49dbb4ea7b717d9eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py @@ -0,0 +1,1676 @@ +"""Implementation of an FFT-based Short-time Fourier Transform. """ + +# Implementation Notes for this file (as of 2023-07) +# -------------------------------------------------- +# * MyPy version 1.1.1 does not seem to support decorated property methods +# properly. Hence, applying ``@property`` to methods decorated with `@cache`` +# (as tried with the ``lower_border_end`` method) causes a mypy error when +# accessing it as an index (e.g., ``SFT.lower_border_end[0]``). +# * Since the method `stft` and `istft` have identical names as the legacy +# functions in the signal module, referencing them as HTML link in the +# docstrings has to be done by an explicit `~ShortTimeFFT.stft` instead of an +# ambiguous `stft` (The ``~`` hides the class / module name). +# * The HTML documentation currently renders each method/property on a separate +# page without reference to the parent class. Thus, a link to `ShortTimeFFT` +# was added to the "See Also" section of each method/property. These links +# can be removed, when SciPy updates ``pydata-sphinx-theme`` to >= 0.13.3 +# (currently 0.9). Consult Issue 18512 and PR 16660 for further details. +# + +# Provides typing union operator ``|`` in Python 3.9: +from __future__ import annotations +# Linter does not allow to import ``Generator`` from ``typing`` module: +from collections.abc import Generator +from functools import cache, lru_cache, partial +from typing import Callable, get_args, Literal + +import numpy as np + +import scipy.fft as fft_lib +from scipy.signal import detrend +from scipy.signal.windows import get_window + +__all__ = ['ShortTimeFFT'] + + +#: Allowed values for parameter `padding` of method `ShortTimeFFT.stft()`: +PAD_TYPE = Literal['zeros', 'edge', 'even', 'odd'] + +#: Allowed values for property `ShortTimeFFT.fft_mode`: +FFT_MODE_TYPE = Literal['twosided', 'centered', 'onesided', 'onesided2X'] + + +def _calc_dual_canonical_window(win: np.ndarray, hop: int) -> np.ndarray: + """Calculate canonical dual window for 1d window `win` and a time step + of `hop` samples. + + A ``ValueError`` is raised, if the inversion fails. + + This is a separate function not a method, since it is also used in the + class method ``ShortTimeFFT.from_dual()``. + """ + if hop > len(win): + raise ValueError(f"{hop=} is larger than window length of {len(win)}" + + " => STFT not invertible!") + if issubclass(win.dtype.type, np.integer): + raise ValueError("Parameter 'win' cannot be of integer type, but " + + f"{win.dtype=} => STFT not invertible!") + # The calculation of `relative_resolution` does not work for ints. + # Furthermore, `win / DD` casts the integers away, thus an implicit + # cast is avoided, which can always cause confusion when using 32-Bit + # floats. + + w2 = win.real**2 + win.imag**2 # win*win.conj() does not ensure w2 is real + DD = w2.copy() + for k_ in range(hop, len(win), hop): + DD[k_:] += w2[:-k_] + DD[:-k_] += w2[k_:] + + # check DD > 0: + relative_resolution = np.finfo(win.dtype).resolution * max(DD) + if not np.all(DD >= relative_resolution): + raise ValueError("Short-time Fourier Transform not invertible!") + + return win / DD + + +# noinspection PyShadowingNames +class ShortTimeFFT: + r"""Provide a parametrized discrete Short-time Fourier transform (stft) + and its inverse (istft). + + .. currentmodule:: scipy.signal.ShortTimeFFT + + The `~ShortTimeFFT.stft` calculates sequential FFTs by sliding a + window (`win`) over an input signal by `hop` increments. It can be used to + quantify the change of the spectrum over time. + + The `~ShortTimeFFT.stft` is represented by a complex-valued matrix S[q,p] + where the p-th column represents an FFT with the window centered at the + time t[p] = p * `delta_t` = p * `hop` * `T` where `T` is the sampling + interval of the input signal. The q-th row represents the values at the + frequency f[q] = q * `delta_f` with `delta_f` = 1 / (`mfft` * `T`) being + the bin width of the FFT. + + The inverse STFT `~ShortTimeFFT.istft` is calculated by reversing the steps + of the STFT: Take the IFFT of the p-th slice of S[q,p] and multiply the + result with the so-called dual window (see `dual_win`). Shift the result by + p * `delta_t` and add the result to previous shifted results to reconstruct + the signal. If only the dual window is known and the STFT is invertible, + `from_dual` can be used to instantiate this class. + + Due to the convention of time t = 0 being at the first sample of the input + signal, the STFT values typically have negative time slots. Hence, + negative indexes like `p_min` or `k_min` do not indicate counting + backwards from an array's end like in standard Python indexing but being + left of t = 0. + + More detailed information can be found in the :ref:`tutorial_stft` section + of the :ref:`user_guide`. + + Note that all parameters of the initializer, except `scale_to` (which uses + `scaling`) have identical named attributes. + + Parameters + ---------- + win : np.ndarray + The window must be a real- or complex-valued 1d array. + hop : int + The increment in samples, by which the window is shifted in each step. + fs : float + Sampling frequency of input signal and window. Its relation to the + sampling interval `T` is ``T = 1 / fs``. + fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' + Mode of FFT to be used (default 'onesided'). + See property `fft_mode` for details. + mfft: int | None + Length of the FFT used, if a zero padded FFT is desired. + If ``None`` (default), the length of the window `win` is used. + dual_win : np.ndarray | None + The dual window of `win`. If set to ``None``, it is calculated if + needed. + scale_to : 'magnitude', 'psd' | None + If not ``None`` (default) the window function is scaled, so each STFT + column represents either a 'magnitude' or a power spectral density + ('psd') spectrum. This parameter sets the property `scaling` to the + same value. See method `scale_to` for details. + phase_shift : int | None + If set, add a linear phase `phase_shift` / `mfft` * `f` to each + frequency `f`. The default value 0 ensures that there is no phase shift + on the zeroth slice (in which t=0 is centered). See property + `phase_shift` for more details. + + Examples + -------- + The following example shows the magnitude of the STFT of a sine with + varying frequency :math:`f_i(t)` (marked by a red dashed line in the plot): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal + >>> t_x = np.arange(N) * T_x # time indexes for signal + >>> f_i = 1 * np.arctan((t_x - t_x[N // 2]) / 2) + 5 # varying frequency + >>> x = np.sin(2*np.pi*np.cumsum(f_i)*T_x) # the signal + + The utilized Gaussian window is 50 samples or 2.5 s long. The parameter + ``mfft=200`` in `ShortTimeFFT` causes the spectrum to be oversampled + by a factor of 4: + + >>> g_std = 8 # standard deviation for Gaussian window in samples + >>> w = gaussian(50, std=g_std, sym=True) # symmetric Gaussian window + >>> SFT = ShortTimeFFT(w, hop=10, fs=1/T_x, mfft=200, scale_to='magnitude') + >>> Sx = SFT.stft(x) # perform the STFT + + In the plot, the time extent of the signal `x` is marked by vertical dashed + lines. Note that the SFT produces values outside the time range of `x`. The + shaded areas on the left and the right indicate border effects caused + by the window slices in that area not fully being inside time range of + `x`: + + >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit + >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot + >>> ax1.set_title(rf"STFT ({SFT.m_num*SFT.T:g}$\,s$ Gaussian window, " + + ... rf"$\sigma_t={g_std*SFT.T}\,$s)") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", + ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", + ... xlim=(t_lo, t_hi)) + ... + >>> im1 = ax1.imshow(abs(Sx), origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='viridis') + >>> ax1.plot(t_x, f_i, 'r--', alpha=.5, label='$f_i(t)$') + >>> fig1.colorbar(im1, label="Magnitude $|S_x(t, f)|$") + ... + >>> # Shade areas where window slices stick out to the side: + >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), + ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: + ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.2) + >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line: + ... ax1.axvline(t_, color='y', linestyle='--', alpha=0.5) + >>> ax1.legend() + >>> fig1.tight_layout() + >>> plt.show() + + Reconstructing the signal with the `~ShortTimeFFT.istft` is + straightforward, but note that the length of `x1` should be specified, + since the SFT length increases in `hop` steps: + + >>> SFT.invertible # check if invertible + True + >>> x1 = SFT.istft(Sx, k1=N) + >>> np.allclose(x, x1) + True + + It is possible to calculate the SFT of signal parts: + + >>> p_q = SFT.nearest_k_p(N // 2) + >>> Sx0 = SFT.stft(x[:p_q]) + >>> Sx1 = SFT.stft(x[p_q:]) + + When assembling sequential STFT parts together, the overlap needs to be + considered: + + >>> p0_ub = SFT.upper_border_begin(p_q)[1] - SFT.p_min + >>> p1_le = SFT.lower_border_end[1] - SFT.p_min + >>> Sx01 = np.hstack((Sx0[:, :p0_ub], + ... Sx0[:, p0_ub:] + Sx1[:, :p1_le], + ... Sx1[:, p1_le:])) + >>> np.allclose(Sx01, Sx) # Compare with SFT of complete signal + True + + It is also possible to calculate the `itsft` for signal parts: + + >>> y_p = SFT.istft(Sx, N//3, N//2) + >>> np.allclose(y_p, x[N//3:N//2]) + True + + """ + # immutable attributes (only have getters but no setters): + _win: np.ndarray # window + _dual_win: np.ndarray | None = None # canonical dual window + _hop: int # Step of STFT in number of samples + + # mutable attributes: + _fs: float # sampling frequency of input signal and window + _fft_mode: FFT_MODE_TYPE = 'onesided' # Mode of FFT to use + _mfft: int # length of FFT used - defaults to len(win) + _scaling: Literal['magnitude', 'psd'] | None = None # Scaling of _win + _phase_shift: int | None # amount to shift phase of FFT in samples + + # attributes for caching calculated values: + _fac_mag: float | None = None + _fac_psd: float | None = None + _lower_border_end: tuple[int, int] | None = None + + def __init__(self, win: np.ndarray, hop: int, fs: float, *, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + dual_win: np.ndarray | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + if not (win.ndim == 1 and win.size > 0): + raise ValueError(f"Parameter win must be 1d, but {win.shape=}!") + if not all(np.isfinite(win)): + raise ValueError("Parameter win must have finite entries!") + if not (hop >= 1 and isinstance(hop, int)): + raise ValueError(f"Parameter {hop=} is not an integer >= 1!") + self._win, self._hop, self.fs = win, hop, fs + + self.mfft = len(win) if mfft is None else mfft + + if dual_win is not None: + if dual_win.shape != win.shape: + raise ValueError(f"{dual_win.shape=} must equal {win.shape=}!") + if not all(np.isfinite(dual_win)): + raise ValueError("Parameter dual_win must be a finite array!") + self._dual_win = dual_win # needs to be set before scaling + + if scale_to is not None: # needs to be set before fft_mode + self.scale_to(scale_to) + + self.fft_mode, self.phase_shift = fft_mode, phase_shift + + @classmethod + def from_dual(cls, dual_win: np.ndarray, hop: int, fs: float, *, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + r"""Instantiate a `ShortTimeFFT` by only providing a dual window. + + If an STFT is invertible, it is possible to calculate the window `win` + from a given dual window `dual_win`. All other parameters have the + same meaning as in the initializer of `ShortTimeFFT`. + + As explained in the :ref:`tutorial_stft` section of the + :ref:`user_guide`, an invertible STFT can be interpreted as series + expansion of time-shifted and frequency modulated dual windows. E.g., + the series coefficient S[q,p] belongs to the term, which shifted + `dual_win` by p * `delta_t` and multiplied it by + exp( 2 * j * pi * t * q * `delta_f`). + + + Examples + -------- + The following example discusses decomposing a signal into time- and + frequency-shifted Gaussians. A Gaussian with standard deviation of + one made up of 51 samples will be used: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T, N = 0.1, 51 + >>> d_win = gaussian(N, std=1/T, sym=True) # symmetric Gaussian window + >>> t = T * (np.arange(N) - N//2) + ... + >>> fg1, ax1 = plt.subplots() + >>> ax1.set_title(r"Dual Window: Gaussian with $\sigma_t=1$") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", + ... xlim=(t[0], t[-1]), ylim=(0, 1.1*max(d_win))) + >>> ax1.plot(t, d_win, 'C0-') + + The following plot with the overlap of 41, 11 and 2 samples show how + the `hop` interval affects the shape of the window `win`: + + >>> fig2, axx = plt.subplots(3, 1, sharex='all') + ... + >>> axx[0].set_title(r"Windows for hop$\in\{10, 40, 49\}$") + >>> for c_, h_ in enumerate([10, 40, 49]): + ... SFT = ShortTimeFFT.from_dual(d_win, h_, 1/T) + ... axx[c_].plot(t + h_ * T, SFT.win, 'k--', alpha=.3, label=None) + ... axx[c_].plot(t - h_ * T, SFT.win, 'k:', alpha=.3, label=None) + ... axx[c_].plot(t, SFT.win, f'C{c_+1}', + ... label=r"$\Delta t=%0.1f\,$s" % SFT.delta_t) + ... axx[c_].set_ylim(0, 1.1*max(SFT.win)) + ... axx[c_].legend(loc='center') + >>> axx[-1].set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", + ... xlim=(t[0], t[-1])) + >>> plt.show() + + Beside the window `win` centered at t = 0 the previous (t = -`delta_t`) + and following window (t = `delta_t`) are depicted. It can be seen that + for small `hop` intervals, the window is compact and smooth, having a + good time-frequency concentration in the STFT. For the large `hop` + interval of 4.9 s, the window has small values around t = 0, which are + not covered by the overlap of the adjacent windows, which could lead to + numeric inaccuracies. Furthermore, the peaky shape at the beginning and + the end of the window points to a higher bandwidth, resulting in a + poorer time-frequency resolution of the STFT. + Hence, the choice of the `hop` interval will be a compromise between + a time-frequency resolution and memory requirements demanded by small + `hop` sizes. + + See Also + -------- + from_window: Create instance by wrapping `get_window`. + ShortTimeFFT: Create instance using standard initializer. + """ + win = _calc_dual_canonical_window(dual_win, hop) + return cls(win=win, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft, + dual_win=dual_win, scale_to=scale_to, + phase_shift=phase_shift) + + @classmethod + def from_window(cls, win_param: str | tuple | float, + fs: float, nperseg: int, noverlap: int, *, + symmetric_win: bool = False, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + """Instantiate `ShortTimeFFT` by using `get_window`. + + The method `get_window` is used to create a window of length + `nperseg`. The parameter names `noverlap`, and `nperseg` are used here, + since they more inline with other classical STFT libraries. + + Parameters + ---------- + win_param: Union[str, tuple, float], + Parameters passed to `get_window`. For windows with no parameters, + it may be a string (e.g., ``'hann'``), for parametrized windows a + tuple, (e.g., ``('gaussian', 2.)``) or a single float specifying + the shape parameter of a kaiser window (i.e. ``4.`` and + ``('kaiser', 4.)`` are equal. See `get_window` for more details. + fs : float + Sampling frequency of input signal. Its relation to the + sampling interval `T` is ``T = 1 / fs``. + nperseg: int + Window length in samples, which corresponds to the `m_num`. + noverlap: int + Window overlap in samples. It relates to the `hop` increment by + ``hop = npsereg - noverlap``. + symmetric_win: bool + If ``True`` then a symmetric window is generated, else a periodic + window is generated (default). Though symmetric windows seem for + most applications to be more sensible, the default of a periodic + windows was chosen to correspond to the default of `get_window`. + fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' + Mode of FFT to be used (default 'onesided'). + See property `fft_mode` for details. + mfft: int | None + Length of the FFT used, if a zero padded FFT is desired. + If ``None`` (default), the length of the window `win` is used. + scale_to : 'magnitude', 'psd' | None + If not ``None`` (default) the window function is scaled, so each + STFT column represents either a 'magnitude' or a power spectral + density ('psd') spectrum. This parameter sets the property + `scaling` to the same value. See method `scale_to` for details. + phase_shift : int | None + If set, add a linear phase `phase_shift` / `mfft` * `f` to each + frequency `f`. The default value 0 ensures that there is no phase + shift on the zeroth slice (in which t=0 is centered). See property + `phase_shift` for more details. + + Examples + -------- + The following instances ``SFT0`` and ``SFT1`` are equivalent: + + >>> from scipy.signal import ShortTimeFFT, get_window + >>> nperseg = 9 # window length + >>> w = get_window(('gaussian', 2.), nperseg) + >>> fs = 128 # sampling frequency + >>> hop = 3 # increment of STFT time slice + >>> SFT0 = ShortTimeFFT(w, hop, fs=fs) + >>> SFT1 = ShortTimeFFT.from_window(('gaussian', 2.), fs, nperseg, + ... noverlap=nperseg-hop) + + See Also + -------- + scipy.signal.get_window: Return a window of a given length and type. + from_dual: Create instance using dual window. + ShortTimeFFT: Create instance using standard initializer. + """ + win = get_window(win_param, nperseg, fftbins=not symmetric_win) + return cls(win, hop=nperseg-noverlap, fs=fs, fft_mode=fft_mode, + mfft=mfft, scale_to=scale_to, phase_shift=phase_shift) + + @property + def win(self) -> np.ndarray: + """Window function as real- or complex-valued 1d array. + + This attribute is read only, since `dual_win` depends on it. + + See Also + -------- + dual_win: Canonical dual window. + m_num: Number of samples in window `win`. + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: ime increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self._win + + @property + def hop(self) -> int: + """Time increment in signal samples for sliding window. + + This attribute is read only, since `dual_win` depends on it. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + m_num: Number of samples in window `win`. + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + T: Sampling interval of input signal and of the window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self._hop + + @property + def T(self) -> float: + """Sampling interval of input signal and of the window. + + A ``ValueError`` is raised if it is set to a non-positive value. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + fs: Sampling frequency (being ``1/T``) + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this property belongs to. + """ + return 1 / self._fs + + @T.setter + def T(self, v: float): + """Sampling interval of input signal and of the window. + + A ``ValueError`` is raised if it is set to a non-positive value. + """ + if not (v > 0): + raise ValueError(f"Sampling interval T={v} must be positive!") + self._fs = 1 / v + + @property + def fs(self) -> float: + """Sampling frequency of input signal and of the window. + + The sampling frequency is the inverse of the sampling interval `T`. + A ``ValueError`` is raised if it is set to a non-positive value. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + T: Sampling interval of input signal and of the window (``1/fs``). + ShortTimeFFT: Class this property belongs to. + """ + return self._fs + + @fs.setter + def fs(self, v: float): + """Sampling frequency of input signal and of the window. + + The sampling frequency is the inverse of the sampling interval `T`. + A ``ValueError`` is raised if it is set to a non-positive value. + """ + if not (v > 0): + raise ValueError(f"Sampling frequency fs={v} must be positive!") + self._fs = v + + @property + def fft_mode(self) -> FFT_MODE_TYPE: + """Mode of utilized FFT ('twosided', 'centered', 'onesided' or + 'onesided2X'). + + It can have the following values: + + 'twosided': + Two-sided FFT, where values for the negative frequencies are in + upper half of the array. Corresponds to :func:`~scipy.fft.fft()`. + 'centered': + Two-sided FFT with the values being ordered along monotonically + increasing frequencies. Corresponds to applying + :func:`~scipy.fft.fftshift()` to :func:`~scipy.fft.fft()`. + 'onesided': + Calculates only values for non-negative frequency values. + Corresponds to :func:`~scipy.fft.rfft()`. + 'onesided2X': + Like `onesided`, but the non-zero frequencies are doubled if + `scaling` is set to 'magnitude' or multiplied by ``sqrt(2)`` if + set to 'psd'. If `scaling` is ``None``, setting `fft_mode` to + `onesided2X` is not allowed. + If the FFT length `mfft` is even, the last FFT value is not paired, + and thus it is not scaled. + + Note that `onesided` and `onesided2X` do not work for complex-valued signals or + complex-valued windows. Furthermore, the frequency values can be obtained by + reading the `f` property, and the number of samples by accessing the `f_pts` + property. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + f_pts: Width of the frequency bins of the STFT. + onesided_fft: True if a one-sided FFT is used. + scaling: Normalization applied to the window function + ShortTimeFFT: Class this property belongs to. + """ + return self._fft_mode + + @fft_mode.setter + def fft_mode(self, t: FFT_MODE_TYPE): + """Set mode of FFT. + + Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. + See the property `fft_mode` for more details. + """ + if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)): + raise ValueError(f"fft_mode='{t}' not in {fft_mode_types}!") + + if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win): + raise ValueError(f"One-sided spectra, i.e., fft_mode='{t}', " + + "are not allowed for complex-valued windows!") + + if t == 'onesided2X' and self.scaling is None: + raise ValueError(f"For scaling is None, fft_mode='{t}' is invalid!" + "Do scale_to('psd') or scale_to('magnitude')!") + self._fft_mode = t + + @property + def mfft(self) -> int: + """Length of input for the FFT used - may be larger than window + length `m_num`. + + If not set, `mfft` defaults to the window length `m_num`. + + See Also + -------- + f_pts: Number of points along the frequency axis. + f: Frequencies values of the STFT. + m_num: Number of samples in window `win`. + ShortTimeFFT: Class this property belongs to. + """ + return self._mfft + + @mfft.setter + def mfft(self, n_: int): + """Setter for the length of FFT utilized. + + See the property `mfft` for further details. + """ + if not (n_ >= self.m_num): + raise ValueError(f"Attribute mfft={n_} needs to be at least the " + + f"window length m_num={self.m_num}!") + self._mfft = n_ + + @property + def scaling(self) -> Literal['magnitude', 'psd'] | None: + """Normalization applied to the window function + ('magnitude', 'psd' or ``None``). + + If not ``None``, the FFTs can be either interpreted as a magnitude or + a power spectral density spectrum. + + The window function can be scaled by calling the `scale_to` method, + or it is set by the initializer parameter ``scale_to``. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + fac_psd: Scaling factor for to a power spectral density spectrum. + fft_mode: Mode of utilized FFT + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + ShortTimeFFT: Class this property belongs to. + """ + return self._scaling + + def scale_to(self, scaling: Literal['magnitude', 'psd']): + """Scale window to obtain 'magnitude' or 'psd' scaling for the STFT. + + The window of a 'magnitude' spectrum has an integral of one, i.e., unit + area for non-negative windows. This ensures that absolute the values of + spectrum does not change if the length of the window changes (given + the input signal is stationary). + + To represent the power spectral density ('psd') for varying length + windows the area of the absolute square of the window needs to be + unity. + + The `scaling` property shows the current scaling. The properties + `fac_magnitude` and `fac_psd` show the scaling factors required to + scale the STFT values to a magnitude or a psd spectrum. + + This method is called, if the initializer parameter `scale_to` is set. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + fac_psd: Scaling factor for to a power spectral density spectrum. + fft_mode: Mode of utilized FFT + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this method belongs to. + """ + if scaling not in (scaling_values := {'magnitude', 'psd'}): + raise ValueError(f"{scaling=} not in {scaling_values}!") + if self._scaling == scaling: # do nothing + return + + s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude + self._win = self._win * s_fac + if self._dual_win is not None: + self._dual_win = self._dual_win / s_fac + self._fac_mag, self._fac_psd = None, None # reset scaling factors + self._scaling = scaling + + @property + def phase_shift(self) -> int | None: + """If set, add linear phase `phase_shift` / `mfft` * `f` to each FFT + slice of frequency `f`. + + Shifting (more precisely `rolling`) an `mfft`-point FFT input by + `phase_shift` samples results in a multiplication of the output by + ``np.exp(2j*np.pi*q*phase_shift/mfft)`` at the frequency q * `delta_f`. + + The default value 0 ensures that there is no phase shift on the + zeroth slice (in which t=0 is centered). + No phase shift (``phase_shift is None``) is equivalent to + ``phase_shift = -mfft//2``. In this case slices are not shifted + before calculating the FFT. + + The absolute value of `phase_shift` is limited to be less than `mfft`. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + mfft: Length of input for the FFT used + ShortTimeFFT: Class this property belongs to. + """ + return self._phase_shift + + @phase_shift.setter + def phase_shift(self, v: int | None): + """The absolute value of the phase shift needs to be less than mfft + samples. + + See the `phase_shift` getter method for more details. + """ + if v is None: + self._phase_shift = v + return + if not isinstance(v, int): + raise ValueError(f"phase_shift={v} has the unit samples. Hence " + + "it needs to be an int or it may be None!") + if not (-self.mfft < v < self.mfft): + raise ValueError("-mfft < phase_shift < mfft does not hold " + + f"for mfft={self.mfft}, phase_shift={v}!") + self._phase_shift = v + + def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int, + padding: PAD_TYPE) -> Generator[np.ndarray, None, None]: + """Generate signal slices along last axis of `x`. + + This method is only used by `stft_detrend`. The parameters are + described in `~ShortTimeFFT.stft`. + """ + if padding not in (padding_types := get_args(PAD_TYPE)): + raise ValueError(f"Parameter {padding=} not in {padding_types}!") + pad_kws: dict[str, dict] = { # possible keywords to pass to np.pad: + 'zeros': dict(mode='constant', constant_values=(0, 0)), + 'edge': dict(mode='edge'), + 'even': dict(mode='reflect', reflect_type='even'), + 'odd': dict(mode='reflect', reflect_type='odd'), + } # typing of pad_kws is needed to make mypy happy + + n, n1 = x.shape[-1], (p1 - p0) * self.hop + k0 = p0 * self.hop - self.m_num_mid + k_off # start sample + k1 = k0 + n1 + self.m_num # end sample + + i0, i1 = max(k0, 0), min(k1, n) # indexes to shorten x + # dimensions for padding x: + pad_width = [(0, 0)] * (x.ndim-1) + [(-min(k0, 0), max(k1 - n, 0))] + + x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding]) + for k_ in range(0, n1, self.hop): + yield x1[..., k_:k_ + self.m_num] + + def stft(self, x: np.ndarray, p0: int | None = None, + p1: int | None = None, *, k_offset: int = 0, + padding: PAD_TYPE = 'zeros', axis: int = -1) \ + -> np.ndarray: + """Perform the short-time Fourier transform. + + A two-dimensional matrix with ``p1-p0`` columns is calculated. + The `f_pts` rows represent value at the frequencies `f`. The q-th + column of the windowed FFT with the window `win` is centered at t[q]. + The columns represent the values at the frequencies `f`. + + Parameters + ---------- + x + The input signal as real or complex valued array. For complex values, the + property `fft_mode` must be set to 'twosided' or 'centered'. + p0 + The first element of the range of slices to calculate. If ``None`` + then it is set to :attr:`p_min`, which is the smallest possible + slice. + p1 + The end of the array. If ``None`` then `p_max(n)` is used. + k_offset + Index of first sample (t = 0) in `x`. + padding + Kind of values which are added, when the sliding window sticks out + on either the lower or upper end of the input `x`. Zeros are added + if the default 'zeros' is set. For 'edge' either the first or the + last value of `x` is used. 'even' pads by reflecting the + signal on the first or last sample and 'odd' additionally + multiplies it with -1. + axis + The axis of `x` over which to compute the STFT. + If not given, the last axis is used. + + Returns + ------- + S + A complex array is returned with the dimension always being larger + by one than of `x`. The last axis always represent the time slices + of the STFT. `axis` defines the frequency axis (default second to + last). E.g., for a one-dimensional `x`, a complex 2d array is + returned, with axis 0 representing frequency and axis 1 the time + slices. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + delta_t: Time increment of STFT + f: Frequencies values of the STFT. + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + p_range: Determine and validate slice index range. + stft_detrend: STFT with detrended segments. + t: Times of STFT for an input signal with `n` samples. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + return self.stft_detrend(x, None, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + + def stft_detrend(self, x: np.ndarray, + detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None, # noqa: E501 + p0: int | None = None, p1: int | None = None, *, + k_offset: int = 0, padding: PAD_TYPE = 'zeros', + axis: int = -1) \ + -> np.ndarray: + """Short-time Fourier transform with a trend being subtracted from each + segment beforehand. + + If `detr` is set to 'constant', the mean is subtracted, if set to + "linear", the linear trend is removed. This is achieved by calling + :func:`scipy.signal.detrend`. If `detr` is a function, `detr` is + applied to each segment. + All other parameters have the same meaning as in `~ShortTimeFFT.stft`. + + Note that due to the detrending, the original signal cannot be + reconstructed by the `~ShortTimeFFT.istft`. + + See Also + -------- + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + :meth:`~ShortTimeFFT.stft`: Short-time Fourier transform + (without detrending). + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if self.onesided_fft and np.iscomplexobj(x): + raise ValueError(f"Complex-valued `x` not allowed for {self.fft_mode=}'! " + "Set property `fft_mode` to 'twosided' or 'centered'.") + if isinstance(detr, str): + detr = partial(detrend, type=detr) + elif not (detr is None or callable(detr)): + raise ValueError(f"Parameter {detr=} is not a str, function or " + + "None!") + n = x.shape[axis] + if not (n >= (m2p := self.m_num-self.m_num_mid)): + e_str = f'{len(x)=}' if x.ndim == 1 else f'of {axis=} of {x.shape}' + raise ValueError(f"{e_str} must be >= ceil(m_num/2) = {m2p}!") + + if x.ndim > 1: # motivated by the NumPy broadcasting mechanisms: + x = np.moveaxis(x, axis, -1) + # determine slice index range: + p0, p1 = self.p_range(n, p0, p1) + S_shape_1d = (self.f_pts, p1 - p0) + S_shape = x.shape[:-1] + S_shape_1d if x.ndim > 1 else S_shape_1d + S = np.zeros(S_shape, dtype=complex) + for p_, x_ in enumerate(self._x_slices(x, k_offset, p0, p1, padding)): + if detr is not None: + x_ = detr(x_) + S[..., :, p_] = self._fft_func(x_ * self.win.conj()) + if x.ndim > 1: + return np.moveaxis(S, -2, axis if axis >= 0 else axis-1) + return S + + def spectrogram(self, x: np.ndarray, y: np.ndarray | None = None, + detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None = None, # noqa: E501 + *, + p0: int | None = None, p1: int | None = None, + k_offset: int = 0, padding: PAD_TYPE = 'zeros', + axis: int = -1) \ + -> np.ndarray: + r"""Calculate spectrogram or cross-spectrogram. + + The spectrogram is the absolute square of the STFT, i.e, it is + ``abs(S[q,p])**2`` for given ``S[q,p]`` and thus is always + non-negative. + For two STFTs ``Sx[q,p], Sy[q,p]``, the cross-spectrogram is defined + as ``Sx[q,p] * np.conj(Sx[q,p])`` and is complex-valued. + This is a convenience function for calling `~ShortTimeFFT.stft` / + `stft_detrend`, hence all parameters are discussed there. If `y` is not + ``None`` it needs to have the same shape as `x`. + + Examples + -------- + The following example shows the spectrogram of a square wave with + varying frequency :math:`f_i(t)` (marked by a green dashed line in the + plot) sampled with 20 Hz: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import square, ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal + >>> t_x = np.arange(N) * T_x # time indexes for signal + >>> f_i = 5e-3*(t_x - t_x[N // 3])**2 + 1 # varying frequency + >>> x = square(2*np.pi*np.cumsum(f_i)*T_x) # the signal + + The utitlized Gaussian window is 50 samples or 2.5 s long. The + parameter ``mfft=800`` (oversampling factor 16) and the `hop` interval + of 2 in `ShortTimeFFT` was chosen to produce a sufficient number of + points: + + >>> g_std = 12 # standard deviation for Gaussian window in samples + >>> win = gaussian(50, std=g_std, sym=True) # symmetric Gaussian wind. + >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T_x, mfft=800, scale_to='psd') + >>> Sx2 = SFT.spectrogram(x) # calculate absolute square of STFT + + The plot's colormap is logarithmically scaled as the power spectral + density is in dB. The time extent of the signal `x` is marked by + vertical dashed lines and the shaded areas mark the presence of border + effects: + + >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit + >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot + >>> ax1.set_title(rf"Spectrogram ({SFT.m_num*SFT.T:g}$\,s$ Gaussian " + + ... rf"window, $\sigma_t={g_std*SFT.T:g}\,$s)") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", + ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", + ... xlim=(t_lo, t_hi)) + >>> Sx_dB = 10 * np.log10(np.fmax(Sx2, 1e-4)) # limit range to -40 dB + >>> im1 = ax1.imshow(Sx_dB, origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='magma') + >>> ax1.plot(t_x, f_i, 'g--', alpha=.5, label='$f_i(t)$') + >>> fig1.colorbar(im1, label='Power Spectral Density ' + + ... r"$20\,\log_{10}|S_x(t, f)|$ in dB") + ... + >>> # Shade areas where window slices stick out to the side: + >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), + ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: + ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.3) + >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line + ... ax1.axvline(t_, color='c', linestyle='--', alpha=0.5) + >>> ax1.legend() + >>> fig1.tight_layout() + >>> plt.show() + + The logarithmic scaling reveals the odd harmonics of the square wave, + which are reflected at the Nyquist frequency of 10 Hz. This aliasing + is also the main source of the noise artifacts in the plot. + + + See Also + -------- + :meth:`~ShortTimeFFT.stft`: Perform the short-time Fourier transform. + stft_detrend: STFT with a trend subtracted from each segment. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + if y is None or y is x: # do spectrogram: + return Sx.real**2 + Sx.imag**2 + # Cross-spectrogram: + Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + return Sx * Sy.conj() + + @property + def dual_win(self) -> np.ndarray: + """Canonical dual window. + + A STFT can be interpreted as the input signal being expressed as a + weighted sum of modulated and time-shifted dual windows. Note that for + a given window there exist many dual windows. The canonical window is + the one with the minimal energy (i.e., :math:`L_2` norm). + + `dual_win` has same length as `win`, namely `m_num` samples. + + If the dual window cannot be calculated a ``ValueError`` is raised. + This attribute is read only and calculated lazily. + + See Also + -------- + dual_win: Canonical dual window. + m_num: Number of samples in window `win`. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + if self._dual_win is None: + self._dual_win = _calc_dual_canonical_window(self.win, self.hop) + return self._dual_win + + @property + def invertible(self) -> bool: + """Check if STFT is invertible. + + This is achieved by trying to calculate the canonical dual window. + + See Also + -------- + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + m_num: Number of samples in window `win` and `dual_win`. + dual_win: Canonical dual window. + win: Window for STFT. + ShortTimeFFT: Class this property belongs to. + """ + try: + return len(self.dual_win) > 0 # call self.dual_win() + except ValueError: + return False + + def istft(self, S: np.ndarray, k0: int = 0, k1: int | None = None, *, + f_axis: int = -2, t_axis: int = -1) \ + -> np.ndarray: + """Inverse short-time Fourier transform. + + It returns an array of dimension ``S.ndim - 1`` which is real + if `onesided_fft` is set, else complex. If the STFT is not + `invertible`, or the parameters are out of bounds a ``ValueError`` is + raised. + + Parameters + ---------- + S + A complex valued array where `f_axis` denotes the frequency + values and the `t-axis` dimension the temporal values of the + STFT values. + k0, k1 + The start and the end index of the reconstructed signal. The + default (``k0 = 0``, ``k1 = None``) assumes that the maximum length + signal should be reconstructed. + f_axis, t_axis + The axes in `S` denoting the frequency and the time dimension. + + Notes + ----- + It is required that `S` has `f_pts` entries along the `f_axis`. For + the `t_axis` it is assumed that the first entry corresponds to + `p_min` * `delta_t` (being <= 0). The length of `t_axis` needs to be + compatible with `k1`. I.e., ``S.shape[t_axis] >= self.p_max(k1)`` must + hold, if `k1` is not ``None``. Else `k1` is set to `k_max` with:: + + q_max = S.shape[t_range] + self.p_min + k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid + + The :ref:`tutorial_stft` section of the :ref:`user_guide` discussed the + slicing behavior by means of an example. + + See Also + -------- + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.stft`: Perform Short-time Fourier transform. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if f_axis == t_axis: + raise ValueError(f"{f_axis=} may not be equal to {t_axis=}!") + if S.shape[f_axis] != self.f_pts: + raise ValueError(f"{S.shape[f_axis]=} must be equal to " + + f"{self.f_pts=} ({S.shape=})!") + n_min = self.m_num-self.m_num_mid # minimum signal length + if not (S.shape[t_axis] >= (q_num := self.p_num(n_min))): + raise ValueError(f"{S.shape[t_axis]=} needs to have at least " + + f"{q_num} slices ({S.shape=})!") + if t_axis != S.ndim - 1 or f_axis != S.ndim - 2: + t_axis = S.ndim + t_axis if t_axis < 0 else t_axis + f_axis = S.ndim + f_axis if f_axis < 0 else f_axis + S = np.moveaxis(S, (f_axis, t_axis), (-2, -1)) + + q_max = S.shape[-1] + self.p_min + k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid + + k1 = k_max if k1 is None else k1 + if not (self.k_min <= k0 < k1 <= k_max): + raise ValueError(f"({self.k_min=}) <= ({k0=}) < ({k1=}) <= " + + f"({k_max=}) is false!") + if not (num_pts := k1 - k0) >= n_min: + raise ValueError(f"({k1=}) - ({k0=}) = {num_pts} has to be at " + + f"least the half the window length {n_min}!") + + q0 = (k0 // self.hop + self.p_min if k0 >= 0 else # p_min always <= 0 + k0 // self.hop) + q1 = min(self.p_max(k1), q_max) + k_q0, k_q1 = self.nearest_k_p(k0), self.nearest_k_p(k1, left=False) + n_pts = k_q1 - k_q0 + self.m_num - self.m_num_mid + x = np.zeros(S.shape[:-2] + (n_pts,), + dtype=float if self.onesided_fft else complex) + for q_ in range(q0, q1): + xs = self._ifft_func(S[..., :, q_ - self.p_min]) * self.dual_win + i0 = q_ * self.hop - self.m_num_mid + i1 = min(i0 + self.m_num, n_pts+k0) + j0, j1 = 0, i1 - i0 + if i0 < k0: # xs sticks out to the left on x: + j0 += k0 - i0 + i0 = k0 + x[..., i0-k0:i1-k0] += xs[..., j0:j1] + x = x[..., :k1-k0] + if x.ndim > 1: + x = np.moveaxis(x, -1, f_axis if f_axis < x.ndim else t_axis) + return x + + @property + def fac_magnitude(self) -> float: + """Factor to multiply the STFT values by to scale each frequency slice + to a magnitude spectrum. + + It is 1 if attribute ``scaling == 'magnitude'``. + The window can be scaled to a magnitude spectrum by using the method + `scale_to`. + + See Also + -------- + fac_psd: Scaling factor for to a power spectral density spectrum. + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this property belongs to. + """ + if self.scaling == 'magnitude': + return 1 + if self._fac_mag is None: + self._fac_mag = 1 / abs(sum(self.win)) + return self._fac_mag + + @property + def fac_psd(self) -> float: + """Factor to multiply the STFT values by to scale each frequency slice + to a power spectral density (PSD). + + It is 1 if attribute ``scaling == 'psd'``. + The window can be scaled to a psd spectrum by using the method + `scale_to`. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this property belongs to. + """ + if self.scaling == 'psd': + return 1 + if self._fac_psd is None: + self._fac_psd = 1 / np.sqrt( + sum(self.win.real**2+self.win.imag**2) / self.T) + return self._fac_psd + + @property + def m_num(self) -> int: + """Number of samples in window `win`. + + Note that the FFT can be oversampled by zero-padding. This is achieved + by setting the `mfft` property. + + See Also + -------- + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: Time increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return len(self.win) + + @property + def m_num_mid(self) -> int: + """Center index of window `win`. + + For odd `m_num`, ``(m_num - 1) / 2`` is returned and + for even `m_num` (per definition) ``m_num / 2`` is returned. + + See Also + -------- + m_num: Number of samples in window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: ime increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self.m_num // 2 + + @cache + def _pre_padding(self) -> tuple[int, int]: + """Smallest signal index and slice index due to padding. + + Since, per convention, for time t=0, n,q is zero, the returned values + are negative or zero. + """ + w2 = self.win.real**2 + self.win.imag**2 + # move window to the left until the overlap with t >= 0 vanishes: + n0 = -self.m_num_mid + for q_, n_ in enumerate(range(n0, n0-self.m_num-1, -self.hop)): + n_next = n_ - self.hop + if n_next + self.m_num <= 0 or all(w2[n_next:] == 0): + return n_, -q_ + raise RuntimeError("This is code line should not have been reached!") + # If this case is reached, it probably means the first slice should be + # returned, i.e.: return n0, 0 + + @property + def k_min(self) -> int: + """The smallest possible signal index of the STFT. + + `k_min` is the index of the left-most non-zero value of the lowest + slice `p_min`. Since the zeroth slice is centered over the zeroth + sample of the input signal, `k_min` is never positive. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + return self._pre_padding()[0] + + @property + def p_min(self) -> int: + """The smallest possible slice index. + + `p_min` is the index of the left-most slice, where the window still + sticks into the signal, i.e., has non-zero part for t >= 0. + `k_min` is the smallest index where the window function of the slice + `p_min` is non-zero. + + Since, per convention the zeroth slice is centered at t=0, + `p_min` <= 0 always holds. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this property belongs to. + """ + return self._pre_padding()[1] + + @lru_cache(maxsize=256) + def _post_padding(self, n: int) -> tuple[int, int]: + """Largest signal index and slice index due to padding.""" + w2 = self.win.real**2 + self.win.imag**2 + # move window to the right until the overlap for t < t[n] vanishes: + q1 = n // self.hop # last slice index with t[p1] <= t[n] + k1 = q1 * self.hop - self.m_num_mid + for q_, k_ in enumerate(range(k1, n+self.m_num, self.hop), start=q1): + n_next = k_ + self.hop + if n_next >= n or all(w2[:n-n_next] == 0): + return k_ + self.m_num, q_ + 1 + raise RuntimeError("This is code line should not have been reached!") + # If this case is reached, it probably means the last slice should be + # returned, i.e.: return k1 + self.m_num - self.m_num_mid, q1 + 1 + + def k_max(self, n: int) -> int: + """First sample index after signal end not touched by a time slice. + + `k_max` - 1 is the largest sample index of the slice `p_max` for a + given input signal of `n` samples. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + return self._post_padding(n)[0] + + def p_max(self, n: int) -> int: + """Index of first non-overlapping upper time slice for `n` sample + input. + + Note that center point t[p_max] = (p_max(n)-1) * `delta_t` is typically + larger than last time index t[n-1] == (`n`-1) * `T`. The upper border + of samples indexes covered by the window slices is given by `k_max`. + Furthermore, `p_max` does not denote the number of slices `p_num` since + `p_min` is typically less than zero. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + p_min: The smallest possible slice index. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + return self._post_padding(n)[1] + + def p_num(self, n: int) -> int: + """Number of time slices for an input signal with `n` samples. + + It is given by `p_num` = `p_max` - `p_min` with `p_min` typically + being negative. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this method belongs to. + """ + return self.p_max(n) - self.p_min + + @property + def lower_border_end(self) -> tuple[int, int]: + """First signal index and first slice index unaffected by pre-padding. + + Describes the point where the window does not stick out to the left + of the signal domain. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + # not using @cache decorator due to MyPy limitations + if self._lower_border_end is not None: + return self._lower_border_end + + # first non-zero element in self.win: + m0 = np.flatnonzero(self.win.real**2 + self.win.imag**2)[0] + + # move window to the right until does not stick out to the left: + k0 = -self.m_num_mid + m0 + for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)): + if k_ + self.hop >= 0: # next entry does not stick out anymore + self._lower_border_end = (k_ + self.m_num, q_ + 1) + return self._lower_border_end + self._lower_border_end = (0, max(self.p_min, 0)) # ends at first slice + return self._lower_border_end + + @lru_cache(maxsize=256) + def upper_border_begin(self, n: int) -> tuple[int, int]: + """First signal index and first slice index affected by post-padding. + + Describes the point where the window does begin stick out to the right + of the signal domain. + A detailed example is given :ref:`tutorial_stft_sliding_win` section + of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + w2 = self.win.real**2 + self.win.imag**2 + q2 = n // self.hop + 1 # first t[q] >= t[n] + q1 = max((n-self.m_num) // self.hop - 1, -1) + # move window left until does not stick out to the right: + for q_ in range(q2, q1, -1): + k_ = q_ * self.hop + (self.m_num - self.m_num_mid) + if k_ < n or all(w2[n-k_:] == 0): + return (q_ + 1) * self.hop - self.m_num_mid, q_ + 1 + return 0, 0 # border starts at first slice + + @property + def delta_t(self) -> float: + """Time increment of STFT. + + The time increment `delta_t` = `T` * `hop` represents the sample + increment `hop` converted to time based on the sampling interval `T`. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + hop: Hop size in signal samples for sliding window. + t: Times of STFT for an input signal with `n` samples. + T: Sampling interval of input signal and window `win`. + ShortTimeFFT: Class this property belongs to + """ + return self.T * self.hop + + def p_range(self, n: int, p0: int | None = None, + p1: int | None = None) -> tuple[int, int]: + """Determine and validate slice index range. + + Parameters + ---------- + n : int + Number of samples of input signal, assuming t[0] = 0. + p0 : int | None + First slice index. If 0 then the first slice is centered at t = 0. + If ``None`` then `p_min` is used. Note that p0 may be < 0 if + slices are left of t = 0. + p1 : int | None + End of interval (last value is p1-1). + If ``None`` then `p_max(n)` is used. + + + Returns + ------- + p0_ : int + The fist slice index + p1_ : int + End of interval (last value is p1-1). + + Notes + ----- + A ``ValueError`` is raised if ``p_min <= p0 < p1 <= p_max(n)`` does not + hold. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + p_max = self.p_max(n) # shorthand + p0_ = self.p_min if p0 is None else p0 + p1_ = p_max if p1 is None else p1 + if not (self.p_min <= p0_ < p1_ <= p_max): + raise ValueError(f"Invalid Parameter {p0=}, {p1=}, i.e., " + + f"{self.p_min=} <= p0 < p1 <= {p_max=} " + + f"does not hold for signal length {n=}!") + return p0_, p1_ + + @lru_cache(maxsize=1) + def t(self, n: int, p0: int | None = None, p1: int | None = None, + k_offset: int = 0) -> np.ndarray: + """Times of STFT for an input signal with `n` samples. + + Returns a 1d array with times of the `~ShortTimeFFT.stft` values with + the same parametrization. Note that the slices are + ``delta_t = hop * T`` time units apart. + + Parameters + ---------- + n + Number of sample of the input signal. + p0 + The first element of the range of slices to calculate. If ``None`` + then it is set to :attr:`p_min`, which is the smallest possible + slice. + p1 + The end of the array. If ``None`` then `p_max(n)` is used. + k_offset + Index of first sample (t = 0) in `x`. + + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + nearest_k_p: Nearest sample index k_p for which t[k_p] == t[p] holds. + T: Sampling interval of input signal and of the window (``1/fs``). + fs: Sampling frequency (being ``1/T``) + ShortTimeFFT: Class this method belongs to. + """ + p0, p1 = self.p_range(n, p0, p1) + return np.arange(p0, p1) * self.delta_t + k_offset * self.T + + def nearest_k_p(self, k: int, left: bool = True) -> int: + """Return nearest sample index k_p for which t[k_p] == t[p] holds. + + The nearest next smaller time sample p (where t[p] is the center + position of the window of the p-th slice) is p_k = k // `hop`. + If `hop` is a divisor of `k` than `k` is returned. + If `left` is set than p_k * `hop` is returned else (p_k+1) * `hop`. + + This method can be used to slice an input signal into chunks for + calculating the STFT and iSTFT incrementally. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + T: Sampling interval of input signal and of the window (``1/fs``). + fs: Sampling frequency (being ``1/T``) + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this method belongs to. + """ + p_q, remainder = divmod(k, self.hop) + if remainder == 0: + return k + return p_q * self.hop if left else (p_q + 1) * self.hop + + @property + def delta_f(self) -> float: + """Width of the frequency bins of the STFT. + + Return the frequency interval `delta_f` = 1 / (`mfft` * `T`). + + See Also + -------- + delta_t: Time increment of STFT. + f_pts: Number of points along the frequency axis. + f: Frequencies values of the STFT. + mfft: Length of the input for FFT used. + T: Sampling interval. + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this property belongs to. + """ + return 1 / (self.mfft * self.T) + + @property + def f_pts(self) -> int: + """Number of points along the frequency axis. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + mfft: Length of the input for FFT used. + ShortTimeFFT: Class this property belongs to. + """ + return self.mfft // 2 + 1 if self.onesided_fft else self.mfft + + @property + def onesided_fft(self) -> bool: + """Return True if a one-sided FFT is used. + + Returns ``True`` if `fft_mode` is either 'onesided' or 'onesided2X'. + + See Also + -------- + fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or + 'onesided2X') + ShortTimeFFT: Class this property belongs to. + """ + return self.fft_mode in {'onesided', 'onesided2X'} + + @property + def f(self) -> np.ndarray: + """Frequencies values of the STFT. + + A 1d array of length `f_pts` with `delta_f` spaced entries is returned. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f_pts: Number of points along the frequency axis. + mfft: Length of the input for FFT used. + ShortTimeFFT: Class this property belongs to. + """ + if self.fft_mode in {'onesided', 'onesided2X'}: + return fft_lib.rfftfreq(self.mfft, self.T) + elif self.fft_mode == 'twosided': + return fft_lib.fftfreq(self.mfft, self.T) + elif self.fft_mode == 'centered': + return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T)) + # This should never happen but makes the Linters happy: + fft_modes = get_args(FFT_MODE_TYPE) + raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") + + def _fft_func(self, x: np.ndarray) -> np.ndarray: + """FFT based on the `fft_mode`, `mfft`, `scaling` and `phase_shift` + attributes. + + For multidimensional arrays the transformation is carried out on the + last axis. + """ + if self.phase_shift is not None: + if x.shape[-1] < self.mfft: # zero pad if needed + z_shape = list(x.shape) + z_shape[-1] = self.mfft - x.shape[-1] + x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype))) + p_s = (self.phase_shift + self.m_num_mid) % self.m_num + x = np.roll(x, -p_s, axis=-1) + + if self.fft_mode == 'twosided': + return fft_lib.fft(x, n=self.mfft, axis=-1) + if self.fft_mode == 'centered': + return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1), axes=-1) + if self.fft_mode == 'onesided': + return fft_lib.rfft(x, n=self.mfft, axis=-1) + if self.fft_mode == 'onesided2X': + X = fft_lib.rfft(x, n=self.mfft, axis=-1) + # Either squared magnitude (psd) or magnitude is doubled: + fac = np.sqrt(2) if self.scaling == 'psd' else 2 + # For even input length, the last entry is unpaired: + X[..., 1: -1 if self.mfft % 2 == 0 else None] *= fac + return X + # This should never happen but makes the Linter happy: + fft_modes = get_args(FFT_MODE_TYPE) + raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") + + def _ifft_func(self, X: np.ndarray) -> np.ndarray: + """Inverse to `_fft_func`. + + Returned is an array of length `m_num`. If the FFT is `onesided` + then a float array is returned else a complex array is returned. + For multidimensional arrays the transformation is carried out on the + last axis. + """ + if self.fft_mode == 'twosided': + x = fft_lib.ifft(X, n=self.mfft, axis=-1) + elif self.fft_mode == 'centered': + x = fft_lib.ifft(fft_lib.ifftshift(X, axes=-1), n=self.mfft, axis=-1) + elif self.fft_mode == 'onesided': + x = fft_lib.irfft(X, n=self.mfft, axis=-1) + elif self.fft_mode == 'onesided2X': + Xc = X.copy() # we do not want to modify function parameters + fac = np.sqrt(2) if self.scaling == 'psd' else 2 + # For even length X the last value is not paired with a negative + # value on the two-sided FFT: + q1 = -1 if self.mfft % 2 == 0 else None + Xc[..., 1:q1] /= fac + x = fft_lib.irfft(Xc, n=self.mfft, axis=-1) + else: # This should never happen but makes the Linter happy: + error_str = f"{self.fft_mode=} not in {get_args(FFT_MODE_TYPE)}!" + raise RuntimeError(error_str) + + if self.phase_shift is None: + return x[:self.m_num] + p_s = (self.phase_shift + self.m_num_mid) % self.m_num + return np.roll(x, p_s, axis=-1)[:self.m_num] + + def extent(self, n: int, axes_seq: Literal['tf', 'ft'] = 'tf', + center_bins: bool = False) -> tuple[float, float, float, float]: + """Return minimum and maximum values time-frequency values. + + A tuple with four floats ``(t0, t1, f0, f1)`` for 'tf' and + ``(f0, f1, t0, t1)`` for 'ft' is returned describing the corners + of the time-frequency domain of the `~ShortTimeFFT.stft`. + That tuple can be passed to `matplotlib.pyplot.imshow` as a parameter + with the same name. + + Parameters + ---------- + n : int + Number of samples in input signal. + axes_seq : {'tf', 'ft'} + Return time extent first and then frequency extent or vice-versa. + center_bins: bool + If set (default ``False``), the values of the time slots and + frequency bins are moved from the side the middle. This is useful, + when plotting the `~ShortTimeFFT.stft` values as step functions, + i.e., with no interpolation. + + See Also + -------- + :func:`matplotlib.pyplot.imshow`: Display data as an image. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if axes_seq not in ('tf', 'ft'): + raise ValueError(f"Parameter {axes_seq=} not in ['tf', 'ft']!") + + if self.onesided_fft: + q0, q1 = 0, self.f_pts + elif self.fft_mode == 'centered': + q0 = -self.mfft // 2 + q1 = self.mfft // 2 - 1 if self.mfft % 2 == 0 else self.mfft // 2 + else: + raise ValueError(f"Attribute fft_mode={self.fft_mode} must be " + + "in ['centered', 'onesided', 'onesided2X']") + + p0, p1 = self.p_min, self.p_max(n) # shorthand + if center_bins: + t0, t1 = self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5) + f0, f1 = self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5) + else: + t0, t1 = self.delta_t * p0, self.delta_t * p1 + f0, f1 = self.delta_f * q0, self.delta_f * q1 + return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_signaltools.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..214b35c596dd524ee2eab586b65586f8886d7538 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_signaltools.py @@ -0,0 +1,4628 @@ +# Author: Travis Oliphant +# 1999 -- 2002 + +import operator +import math +from math import prod as _prod +import timeit +import warnings + +from scipy.spatial import cKDTree +from . import _sigtools +from ._ltisys import dlti +from ._upfirdn import upfirdn, _output_len, _upfirdn_modes +from scipy import linalg, fft as sp_fft +from scipy import ndimage +from scipy.fft._helper import _init_nd_shape_and_axes +import numpy as np +from scipy.special import lambertw +from .windows import get_window +from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext +from ._filter_design import cheby1, _validate_sos, zpk2sos +from ._fir_filter_design import firwin +from ._sosfilt import _sosfilt + + +__all__ = ['correlate', 'correlation_lags', 'correlate2d', + 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', + 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength'] + + +_modedict = {'valid': 0, 'same': 1, 'full': 2} + +_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, + 'symmetric': 1, 'reflect': 4} + + +def _valfrommode(mode): + try: + return _modedict[mode] + except KeyError as e: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") from e + + +def _bvalfromboundary(boundary): + try: + return _boundarydict[boundary] << 2 + except KeyError as e: + raise ValueError("Acceptable boundary flags are 'fill', 'circular' " + "(or 'wrap'), and 'symmetric' (or 'symm').") from e + + +def _inputs_swap_needed(mode, shape1, shape2, axes=None): + """Determine if inputs arrays need to be swapped in `"valid"` mode. + + If in `"valid"` mode, returns whether or not the input arrays need to be + swapped depending on whether `shape1` is at least as large as `shape2` in + every calculated dimension. + + This is important for some of the correlation and convolution + implementations in this module, where the larger array input needs to come + before the smaller array input when operating in this mode. + + Note that if the mode provided is not 'valid', False is immediately + returned. + + """ + if mode != 'valid': + return False + + if not shape1: + return False + + if axes is None: + axes = range(len(shape1)) + + ok1 = all(shape1[i] >= shape2[i] for i in axes) + ok2 = all(shape2[i] >= shape1[i] for i in axes) + + if not (ok1 or ok2): + raise ValueError("For 'valid' mode, one must be at least " + "as large as the other in every dimension") + + return not ok1 + + +def correlate(in1, in2, mode='full', method='auto'): + r""" + Cross-correlate two N-dimensional arrays. + + Cross-correlate `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the correlation. + + ``direct`` + The correlation is determined directly from sums, the definition of + correlation. + ``fft`` + The Fast Fourier Transform is used to perform the correlation more + quickly (only available for numerical arrays.) + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See `convolve` Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + correlate : array + An N-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + See Also + -------- + choose_conv_method : contains more documentation on `method`. + correlation_lags : calculates the lag / displacement indices array for 1D + cross-correlation. + + Notes + ----- + The correlation z of two d-dimensional arrays x and y is defined as:: + + z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) + + This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` + then + + .. math:: + + z[k] = (x * y)(k - N + 1) + = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} + + for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` + + where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, + and :math:`y_m` is 0 when m is outside the range of y. + + ``method='fft'`` only works for numerical arrays as it relies on + `fftconvolve`. In certain cases (i.e., arrays of objects or when + rounding integers can lose precision), ``method='direct'`` is always used. + + When using "same" mode with even-length inputs, the outputs of `correlate` + and `correlate2d` differ: There is a 1-index offset between them. + + Examples + -------- + Implement a matched filter using cross-correlation, to recover a signal + that has passed through a noisy channel. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) + >>> sig_noise = sig + rng.standard_normal(len(sig)) + >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 + + >>> clock = np.arange(64, len(sig), 128) + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.plot(clock, sig[clock], 'ro') + >>> ax_orig.set_title('Original signal') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_corr.plot(corr) + >>> ax_corr.plot(clock, corr[clock], 'ro') + >>> ax_corr.axhline(0.5, ls=':') + >>> ax_corr.set_title('Cross-correlated with rectangular pulse') + >>> ax_orig.margins(0, 0.1) + >>> fig.tight_layout() + >>> plt.show() + + Compute the cross-correlation of a noisy signal with the original signal. + + >>> x = np.arange(128) / 128 + >>> sig = np.sin(2 * np.pi * x) + >>> sig_noise = sig + rng.standard_normal(len(sig)) + >>> corr = signal.correlate(sig_noise, sig) + >>> lags = signal.correlation_lags(len(sig), len(sig_noise)) + >>> corr /= np.max(corr) + + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8)) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original signal') + >>> ax_orig.set_xlabel('Sample Number') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_noise.set_xlabel('Sample Number') + >>> ax_corr.plot(lags, corr) + >>> ax_corr.set_title('Cross-correlated signal') + >>> ax_corr.set_xlabel('Lag') + >>> ax_orig.margins(0, 0.1) + >>> ax_noise.margins(0, 0.1) + >>> ax_corr.margins(0, 0.1) + >>> fig.tight_layout() + >>> plt.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: + return in1 * in2.conj() + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + + # Don't use _valfrommode, since correlate should not accept numeric modes + try: + val = _modedict[mode] + except KeyError as e: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") from e + + # this either calls fftconvolve or this function with method=='direct' + if method in ('fft', 'auto'): + return convolve(in1, _reverse_and_conj(in2), mode, method) + + elif method == 'direct': + # fastpath to faster numpy.correlate for 1d inputs when possible + if _np_conv_ok(in1, in2, mode): + return np.correlate(in1, in2, mode) + + # _correlateND is far slower when in2.size > in1.size, so swap them + # and then undo the effect afterward if mode == 'full'. Also, it fails + # with 'valid' mode if in2 is larger than in1, so swap those, too. + # Don't swap inputs for 'same' mode, since shape of in1 matters. + swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or + _inputs_swap_needed(mode, in1.shape, in2.shape)) + + if swapped_inputs: + in1, in2 = in2, in1 + + if mode == 'valid': + ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] + out = np.empty(ps, in1.dtype) + + z = _sigtools._correlateND(in1, in2, out, val) + + else: + ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] + + # zero pad input + in1zpadded = np.zeros(ps, in1.dtype) + sc = tuple(slice(0, i) for i in in1.shape) + in1zpadded[sc] = in1.copy() + + if mode == 'full': + out = np.empty(ps, in1.dtype) + elif mode == 'same': + out = np.empty(in1.shape, in1.dtype) + + z = _sigtools._correlateND(in1zpadded, in2, out, val) + + if swapped_inputs: + # Reverse and conjugate to undo the effect of swapping inputs + z = _reverse_and_conj(z) + + return z + + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def correlation_lags(in1_len, in2_len, mode='full'): + r""" + Calculates the lag / displacement indices array for 1D cross-correlation. + + Parameters + ---------- + in1_len : int + First input size. + in2_len : int + Second input size. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `correlate` for more information. + + Returns + ------- + lags : array + Returns an array containing cross-correlation lag/displacement indices. + Indices can be indexed with the np.argmax of the correlation to return + the lag/displacement. + + See Also + -------- + correlate : Compute the N-dimensional cross-correlation. + + Notes + ----- + Cross-correlation for continuous functions :math:`f` and :math:`g` is + defined as: + + .. math:: + + \left ( f\star g \right )\left ( \tau \right ) + \triangleq \int_{t_0}^{t_0 +T} + \overline{f\left ( t \right )}g\left ( t+\tau \right )dt + + Where :math:`\tau` is defined as the displacement, also known as the lag. + + Cross correlation for discrete functions :math:`f` and :math:`g` is + defined as: + + .. math:: + \left ( f\star g \right )\left [ n \right ] + \triangleq \sum_{-\infty}^{\infty} + \overline{f\left [ m \right ]}g\left [ m+n \right ] + + Where :math:`n` is the lag. + + Examples + -------- + Cross-correlation of a signal with its time-delayed self. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(1000) + >>> y = np.concatenate([rng.standard_normal(100), x]) + >>> correlation = signal.correlate(x, y, mode="full") + >>> lags = signal.correlation_lags(x.size, y.size, mode="full") + >>> lag = lags[np.argmax(correlation)] + """ + + # calculate lag ranges in different modes of operation + if mode == "full": + # the output is the full discrete linear convolution + # of the inputs. (Default) + lags = np.arange(-in2_len + 1, in1_len) + elif mode == "same": + # the output is the same size as `in1`, centered + # with respect to the 'full' output. + # calculate the full output + lags = np.arange(-in2_len + 1, in1_len) + # determine the midpoint in the full output + mid = lags.size // 2 + # determine lag_bound to be used with respect + # to the midpoint + lag_bound = in1_len // 2 + # calculate lag ranges for even and odd scenarios + if in1_len % 2 == 0: + lags = lags[(mid-lag_bound):(mid+lag_bound)] + else: + lags = lags[(mid-lag_bound):(mid+lag_bound)+1] + elif mode == "valid": + # the output consists only of those elements that do not + # rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + # must be at least as large as the other in every dimension. + + # the lag_bound will be either negative or positive + # this let's us infer how to present the lag range + lag_bound = in1_len - in2_len + if lag_bound >= 0: + lags = np.arange(lag_bound + 1) + else: + lags = np.arange(lag_bound, 1) + return lags + + +def _centered(arr, newshape): + # Return the center newshape portion of the array. + newshape = np.asarray(newshape) + currshape = np.array(arr.shape) + startind = (currshape - newshape) // 2 + endind = startind + newshape + myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] + return arr[tuple(myslice)] + + +def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): + """Handle the axes argument for frequency-domain convolution. + + Returns the inputs and axes in a standard form, eliminating redundant axes, + swapping the inputs if necessary, and checking for various potential + errors. + + Parameters + ---------- + in1 : array + First input. + in2 : array + Second input. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `fftconvolve` for more information. + axes : list of ints + Axes over which to compute the FFTs. + sorted_axes : bool, optional + If `True`, sort the axes. + Default is `False`, do not sort. + + Returns + ------- + in1 : array + The first input, possible swapped with the second input. + in2 : array + The second input, possible swapped with the first input. + axes : list of ints + Axes over which to compute the FFTs. + + """ + s1 = in1.shape + s2 = in2.shape + noaxes = axes is None + + _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes) + + if not noaxes and not len(axes): + raise ValueError("when provided, axes cannot be empty") + + # Axes of length 1 can rely on broadcasting rules for multiply, + # no fft needed. + axes = [a for a in axes if s1[a] != 1 and s2[a] != 1] + + if sorted_axes: + axes.sort() + + if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1 + for a in range(in1.ndim) if a not in axes): + raise ValueError("incompatible shapes for in1 and in2:" + f" {s1} and {s2}") + + # Check that input sizes are compatible with 'valid' mode. + if _inputs_swap_needed(mode, s1, s2, axes=axes): + # Convolution is commutative; order doesn't have any effect on output. + in1, in2 = in2, in1 + + return in1, in2, axes + + +def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): + """Convolve two arrays in the frequency domain. + + This function implements only base the FFT-related operations. + Specifically, it converts the signals to the frequency domain, multiplies + them, then converts them back to the time domain. Calculations of axes, + shapes, convolution mode, etc. are implemented in higher level-functions, + such as `fftconvolve` and `oaconvolve`. Those functions should be used + instead of this one. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + axes : array_like of ints + Axes over which to compute the FFTs. + shape : array_like of ints + The sizes of the FFTs. + calc_fast_len : bool, optional + If `True`, set each value of `shape` to the next fast FFT length. + Default is `False`, use `axes` as-is. + + Returns + ------- + out : array + An N-dimensional array containing the discrete linear convolution of + `in1` with `in2`. + + """ + if not len(axes): + return in1 * in2 + + complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c') + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [ + sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + if not complex_result: + fft, ifft = sp_fft.rfftn, sp_fft.irfftn + else: + fft, ifft = sp_fft.fftn, sp_fft.ifftn + + sp1 = fft(in1, fshape, axes=axes) + sp2 = fft(in2, fshape, axes=axes) + + ret = ifft(sp1 * sp2, fshape, axes=axes) + + if calc_fast_len: + fslice = tuple([slice(sz) for sz in shape]) + ret = ret[fslice] + + return ret + + +def _apply_conv_mode(ret, s1, s2, mode, axes): + """Calculate the convolution result shape based on the `mode` argument. + + Returns the result sliced to the correct size for the given mode. + + Parameters + ---------- + ret : array + The result array, with the appropriate shape for the 'full' mode. + s1 : list of int + The shape of the first input. + s2 : list of int + The shape of the second input. + mode : str {'full', 'valid', 'same'} + A string indicating the size of the output. + See the documentation `fftconvolve` for more information. + axes : list of ints + Axes over which to compute the convolution. + + Returns + ------- + ret : array + A copy of `res`, sliced to the correct size for the given `mode`. + + """ + if mode == "full": + return ret.copy() + elif mode == "same": + return _centered(ret, s1).copy() + elif mode == "valid": + shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1 + for a in range(ret.ndim)] + return _centered(ret, shape_valid).copy() + else: + raise ValueError("acceptable mode flags are 'valid'," + " 'same', or 'full'") + + +def fftconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using FFT. + + Convolve `in1` and `in2` using the fast Fourier transform method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + but can be slower when only a few output values are needed, and can only + output float arrays (int or object array inputs will be cast to float). + + As of v0.19, `convolve` automatically chooses this method or the direct + method based on an estimation of which is faster. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve : Uses the direct convolution or FFT convolution algorithm + depending on which is faster. + oaconvolve : Uses the overlap-add method to do convolution, which is + generally faster when the input arrays are large and + significantly different in size. + + Examples + -------- + Autocorrelation of white noise is an impulse. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> sig = rng.standard_normal(1000) + >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) + >>> ax_mag.set_title('Autocorrelation') + >>> fig.tight_layout() + >>> fig.show() + + Gaussian blur implemented using FFT convolution. Notice the dark borders + around the image, due to the zero-padding beyond its boundaries. + The `convolve2d` function allows for other types of image boundaries, + but is far slower. + + >>> from scipy import datasets + >>> face = datasets.face(gray=True) + >>> kernel = np.outer(signal.windows.gaussian(70, 8), + ... signal.windows.gaussian(70, 8)) + >>> blurred = signal.fftconvolve(face, kernel, mode='same') + + >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_kernel.imshow(kernel, cmap='gray') + >>> ax_kernel.set_title('Gaussian kernel') + >>> ax_kernel.set_axis_off() + >>> ax_blurred.imshow(blurred, cmap='gray') + >>> ax_blurred.set_title('Blurred') + >>> ax_blurred.set_axis_off() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 + for i in range(in1.ndim)] + + ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _calc_oa_lens(s1, s2): + """Calculate the optimal FFT lengths for overlapp-add convolution. + + The calculation is done for a single dimension. + + Parameters + ---------- + s1 : int + Size of the dimension for the first array. + s2 : int + Size of the dimension for the second array. + + Returns + ------- + block_size : int + The size of the FFT blocks. + overlap : int + The amount of overlap between two blocks. + in1_step : int + The size of each step for the first array. + in2_step : int + The size of each step for the first array. + + """ + # Set up the arguments for the conventional FFT approach. + fallback = (s1+s2-1, None, s1, s2) + + # Use conventional FFT convolve if sizes are same. + if s1 == s2 or s1 == 1 or s2 == 1: + return fallback + + if s2 > s1: + s1, s2 = s2, s1 + swapped = True + else: + swapped = False + + # There cannot be a useful block size if s2 is more than half of s1. + if s2 >= s1/2: + return fallback + + # Derivation of optimal block length + # For original formula see: + # https://en.wikipedia.org/wiki/Overlap-add_method + # + # Formula: + # K = overlap = s2-1 + # N = block_size + # C = complexity + # e = exponential, exp(1) + # + # C = (N*(log2(N)+1))/(N-K) + # C = (N*log2(2N))/(N-K) + # C = N/(N-K) * log2(2N) + # C1 = N/(N-K) + # C2 = log2(2N) = ln(2N)/ln(2) + # + # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2 + # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2)) + # + # dC/dN = dC1/dN*C2 + dC2/dN*C1 + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K)) + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K)) + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2) + # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2) + # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) + # + # Solve for minimum, where dC/dN = 0 + # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) + # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K + # 0 = N - K*ln(2N) - K + # 0 = N - K*(ln(2N) + 1) + # 0 = N - K*ln(2Ne) + # N = K*ln(2Ne) + # N/K = ln(2Ne) + # + # e^(N/K) = e^ln(2Ne) + # e^(N/K) = 2Ne + # 1/e^(N/K) = 1/(2*N*e) + # e^(N/-K) = 1/(2*N*e) + # e^(N/-K) = K/N*1/(2*K*e) + # N/K*e^(N/-K) = 1/(2*e*K) + # N/-K*e^(N/-K) = -1/(2*e*K) + # + # Using Lambert W function + # https://en.wikipedia.org/wiki/Lambert_W_function + # x = W(y) It is the solution to y = x*e^x + # x = N/-K + # y = -1/(2*e*K) + # + # N/-K = W(-1/(2*e*K)) + # + # N = -K*W(-1/(2*e*K)) + overlap = s2-1 + opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real + block_size = sp_fft.next_fast_len(math.ceil(opt_size)) + + # Use conventional FFT convolve if there is only going to be one block. + if block_size >= s1: + return fallback + + if not swapped: + in1_step = block_size-s2+1 + in2_step = s2 + else: + in1_step = s2 + in2_step = block_size-s2+1 + + return block_size, overlap, in1_step, in2_step + + +def oaconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using the overlap-add method. + + Convolve `in1` and `in2` using the overlap-add method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + and generally much faster than `fftconvolve` when one array is much + larger than the other, but can be slower when only a few output values are + needed or when the arrays are very similar in shape, and can only + output float arrays (int or object array inputs will be cast to float). + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve : Uses the direct convolution or FFT convolution algorithm + depending on which is faster. + fftconvolve : An implementation of convolution using FFT. + + Notes + ----- + .. versionadded:: 1.4.0 + + References + ---------- + .. [1] Wikipedia, "Overlap-add_method". + https://en.wikipedia.org/wiki/Overlap-add_method + .. [2] Richard G. Lyons. Understanding Digital Signal Processing, + Third Edition, 2011. Chapter 13.10. + ISBN 13: 978-0137-02741-5 + + Examples + -------- + Convolve a 100,000 sample signal with a 512-sample filter. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> sig = rng.standard_normal(100000) + >>> filt = signal.firwin(512, 0.01) + >>> fsig = signal.oaconvolve(sig, filt) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(fsig) + >>> ax_mag.set_title('Filtered noise') + >>> fig.tight_layout() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + elif in1.shape == in2.shape: # Equivalent to fftconvolve + return fftconvolve(in1, in2, mode=mode, axes=axes) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=True) + + s1 = in1.shape + s2 = in2.shape + + if not axes: + ret = in1 * in2 + return _apply_conv_mode(ret, s1, s2, mode, axes) + + # Calculate this now since in1 is changed later + shape_final = [None if i not in axes else + s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + # Calculate the block sizes for the output, steps, first and second inputs. + # It is simpler to calculate them all together than doing them in separate + # loops due to all the special cases that need to be handled. + optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else + _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim)) + block_size, overlaps, \ + in1_step, in2_step = zip(*optimal_sizes) + + # Fall back to fftconvolve if there is only one block in every dimension. + if in1_step == s1 and in2_step == s2: + return fftconvolve(in1, in2, mode=mode, axes=axes) + + # Figure out the number of steps and padding. + # This would get too complicated in a list comprehension. + nsteps1 = [] + nsteps2 = [] + pad_size1 = [] + pad_size2 = [] + for i in range(in1.ndim): + if i not in axes: + pad_size1 += [(0, 0)] + pad_size2 += [(0, 0)] + continue + + if s1[i] > in1_step[i]: + curnstep1 = math.ceil((s1[i]+1)/in1_step[i]) + if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]: + curnstep1 += 1 + + curpad1 = curnstep1*in1_step[i] - s1[i] + else: + curnstep1 = 1 + curpad1 = 0 + + if s2[i] > in2_step[i]: + curnstep2 = math.ceil((s2[i]+1)/in2_step[i]) + if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]: + curnstep2 += 1 + + curpad2 = curnstep2*in2_step[i] - s2[i] + else: + curnstep2 = 1 + curpad2 = 0 + + nsteps1 += [curnstep1] + nsteps2 += [curnstep2] + pad_size1 += [(0, curpad1)] + pad_size2 += [(0, curpad2)] + + # Pad the array to a size that can be reshaped to the desired shape + # if necessary. + if not all(curpad == (0, 0) for curpad in pad_size1): + in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0) + + if not all(curpad == (0, 0) for curpad in pad_size2): + in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0) + + # Reshape the overlap-add parts to input block sizes. + split_axes = [iax+i for i, iax in enumerate(axes)] + fft_axes = [iax+1 for iax in split_axes] + + # We need to put each new dimension before the corresponding dimension + # being reshaped in order to get the data in the right layout at the end. + reshape_size1 = list(in1_step) + reshape_size2 = list(in2_step) + for i, iax in enumerate(split_axes): + reshape_size1.insert(iax, nsteps1[i]) + reshape_size2.insert(iax, nsteps2[i]) + + in1 = in1.reshape(*reshape_size1) + in2 = in2.reshape(*reshape_size2) + + # Do the convolution. + fft_shape = [block_size[i] for i in axes] + ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False) + + # Do the overlap-add. + for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes): + overlap = overlaps[ax] + if overlap is None: + continue + + ret, overpart = np.split(ret, [-overlap], ax_fft) + overpart = np.split(overpart, [-1], ax_split)[0] + + ret_overpart = np.split(ret, [overlap], ax_fft)[0] + ret_overpart = np.split(ret_overpart, [1], ax_split)[1] + ret_overpart += overpart + + # Reshape back to the correct dimensionality. + shape_ret = [ret.shape[i] if i not in fft_axes else + ret.shape[i]*ret.shape[i-1] + for i in range(ret.ndim) if i not in split_axes] + ret = ret.reshape(*shape_ret) + + # Slice to the correct size. + slice_final = tuple([slice(islice) for islice in shape_final]) + ret = ret[slice_final] + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _numeric_arrays(arrays, kinds='buifc'): + """ + See if a list of arrays are all numeric. + + Parameters + ---------- + arrays : array or list of arrays + arrays to check if numeric. + kinds : string-like + The dtypes of the arrays to be checked. If the dtype.kind of + the ndarrays are not in this string the function returns False and + otherwise returns True. + """ + if type(arrays) == np.ndarray: + return arrays.dtype.kind in kinds + for array_ in arrays: + if array_.dtype.kind not in kinds: + return False + return True + + +def _conv_ops(x_shape, h_shape, mode): + """ + Find the number of operations required for direct/fft methods of + convolution. The direct operations were recorded by making a dummy class to + record the number of operations by overriding ``__mul__`` and ``__add__``. + The FFT operations rely on the (well-known) computational complexity of the + FFT (and the implementation of ``_freq_domain_conv``). + + """ + if mode == "full": + out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] + elif mode == "valid": + out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)] + elif mode == "same": + out_shape = x_shape + else: + raise ValueError("Acceptable mode flags are 'valid'," + f" 'same', or 'full', not mode={mode}") + + s1, s2 = x_shape, h_shape + if len(x_shape) == 1: + s1, s2 = s1[0], s2[0] + if mode == "full": + direct_ops = s1 * s2 + elif mode == "valid": + direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2 + elif mode == "same": + direct_ops = (s1 * s2 if s1 < s2 else + s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)) + else: + if mode == "full": + direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) + elif mode == "valid": + direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) + elif mode == "same": + direct_ops = _prod(s1) * _prod(s2) + + full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] + N = _prod(full_out_shape) + fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape + return fft_ops, direct_ops + + +def _fftconv_faster(x, h, mode): + """ + See if using fftconvolve or convolve is faster. + + Parameters + ---------- + x : np.ndarray + Signal + h : np.ndarray + Kernel + mode : str + Mode passed to convolve + + Returns + ------- + fft_faster : bool + + Notes + ----- + See docstring of `choose_conv_method` for details on tuning hardware. + + See pull request 11031 for more detail: + https://github.com/scipy/scipy/pull/11031. + + """ + fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode) + offset = -1e-3 if x.ndim == 1 else -1e-4 + constants = { + "valid": (1.89095737e-9, 2.1364985e-10, offset), + "full": (1.7649070e-9, 2.1414831e-10, offset), + "same": (3.2646654e-9, 2.8478277e-10, offset) + if h.size <= x.size + else (3.21635404e-9, 1.1773253e-8, -1e-5), + } if x.ndim == 1 else { + "valid": (1.85927e-9, 2.11242e-8, offset), + "full": (1.99817e-9, 1.66174e-8, offset), + "same": (2.04735e-9, 1.55367e-8, offset), + } + O_fft, O_direct, O_offset = constants[mode] + return O_fft * fft_ops < O_direct * direct_ops + O_offset + + +def _reverse_and_conj(x): + """ + Reverse array `x` in all dimensions and perform the complex conjugate + """ + reverse = (slice(None, None, -1),) * x.ndim + return x[reverse].conj() + + +def _np_conv_ok(volume, kernel, mode): + """ + See if numpy supports convolution of `volume` and `kernel` (i.e. both are + 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the + size of the larger input, while SciPy's uses the size of the first input. + + Invalid mode strings will return False and be caught by the calling func. + """ + if volume.ndim == kernel.ndim == 1: + if mode in ('full', 'valid'): + return True + elif mode == 'same': + return volume.size >= kernel.size + else: + return False + + +def _timeit_fast(stmt="pass", setup="pass", repeat=3): + """ + Returns the time the statement/function took, in seconds. + + Faster, less precise version of IPython's timeit. `stmt` can be a statement + written as a string or a callable. + + Will do only 1 loop (like IPython's timeit) with no repetitions + (unlike IPython) for very slow functions. For fast functions, only does + enough loops to take 5 ms, which seems to produce similar results (on + Windows at least), and avoids doing an extraneous cycle that isn't + measured. + + """ + timer = timeit.Timer(stmt, setup) + + # determine number of calls per rep so total time for 1 rep >= 5 ms + x = 0 + for p in range(0, 10): + number = 10**p + x = timer.timeit(number) # seconds + if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one + break + if x > 1: # second + # If it's macroscopic, don't bother with repetitions + best = x + else: + number *= 10 + r = timer.repeat(repeat, number) + best = min(r) + + sec = best / number + return sec + + +def choose_conv_method(in1, in2, mode='full', measure=False): + """ + Find the fastest convolution/correlation method. + + This primarily exists to be called during the ``method='auto'`` option in + `convolve` and `correlate`. It can also be used to determine the value of + ``method`` for many different convolutions of the same dtype/shape. + In addition, it supports timing the convolution to adapt the value of + ``method`` to a particular set of inputs and/or hardware. + + Parameters + ---------- + in1 : array_like + The first argument passed into the convolution function. + in2 : array_like + The second argument passed into the convolution function. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + measure : bool, optional + If True, run and time the convolution of `in1` and `in2` with both + methods and return the fastest. If False (default), predict the fastest + method using precomputed values. + + Returns + ------- + method : str + A string indicating which convolution method is fastest, either + 'direct' or 'fft' + times : dict, optional + A dictionary containing the times (in seconds) needed for each method. + This value is only returned if ``measure=True``. + + See Also + -------- + convolve + correlate + + Notes + ----- + Generally, this method is 99% accurate for 2D signals and 85% accurate + for 1D signals for randomly chosen input sizes. For precision, use + ``measure=True`` to find the fastest method by timing the convolution. + This can be used to avoid the minimal overhead of finding the fastest + ``method`` later, or to adapt the value of ``method`` to a particular set + of inputs. + + Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this + function. These experiments measured the ratio between the time required + when using ``method='auto'`` and the time required for the fastest method + (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these + experiments, we found: + + * There is a 95% chance of this ratio being less than 1.5 for 1D signals + and a 99% chance of being less than 2.5 for 2D signals. + * The ratio was always less than 2.5/5 for 1D/2D signals respectively. + * This function is most inaccurate for 1D convolutions that take between 1 + and 10 milliseconds with ``method='direct'``. A good proxy for this + (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``. + + The 2D results almost certainly generalize to 3D/4D/etc because the + implementation is the same (the 1D implementation is different). + + All the numbers above are specific to the EC2 machine. However, we did find + that this function generalizes fairly decently across hardware. The speed + tests were of similar quality (and even slightly better) than the same + tests performed on the machine to tune this function's numbers (a mid-2014 + 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor). + + There are cases when `fftconvolve` supports the inputs but this function + returns `direct` (e.g., to protect against floating point integer + precision). + + .. versionadded:: 0.19 + + Examples + -------- + Estimate the fastest method for a given input: + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> img = rng.random((32, 32)) + >>> filter = rng.random((8, 8)) + >>> method = signal.choose_conv_method(img, filter, mode='same') + >>> method + 'fft' + + This can then be applied to other arrays of the same dtype and shape: + + >>> img2 = rng.random((32, 32)) + >>> filter2 = rng.random((8, 8)) + >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method) + >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method) + + The output of this function (``method``) works with `correlate` and + `convolve`. + + """ + volume = np.asarray(in1) + kernel = np.asarray(in2) + + if measure: + times = {} + for method in ['fft', 'direct']: + times[method] = _timeit_fast(lambda: convolve(volume, kernel, + mode=mode, method=method)) + + chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct' + return chosen_method, times + + # for integer input, + # catch when more precision required than float provides (representing an + # integer as float can lose precision in fftconvolve if larger than 2**52) + if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]): + max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max()) + max_value *= int(min(volume.size, kernel.size)) + if max_value > 2**np.finfo('float').nmant - 1: + return 'direct' + + if _numeric_arrays([volume, kernel], kinds='b'): + return 'direct' + + if _numeric_arrays([volume, kernel]): + if _fftconv_faster(volume, kernel, mode): + return 'fft' + + return 'direct' + + +def convolve(in1, in2, mode='full', method='auto'): + """ + Convolve two N-dimensional arrays. + + Convolve `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the convolution. + + ``direct`` + The convolution is determined directly from sums, the definition of + convolution. + ``fft`` + The Fourier Transform is used to perform the convolution by calling + `fftconvolve`. + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + convolve : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Warns + ----- + RuntimeWarning + Use of the FFT convolution on input containing NAN or INF will lead + to the entire output being NAN or INF. Use method='direct' when your + input contains NAN or INF values. + + See Also + -------- + numpy.polymul : performs polynomial multiplication (same operation, but + also accepts poly1d objects) + choose_conv_method : chooses the fastest appropriate convolution method + fftconvolve : Always uses the FFT method. + oaconvolve : Uses the overlap-add method to do convolution, which is + generally faster when the input arrays are large and + significantly different in size. + + Notes + ----- + By default, `convolve` and `correlate` use ``method='auto'``, which calls + `choose_conv_method` to choose the fastest method using pre-computed + values (`choose_conv_method` can also measure real-world timing with a + keyword argument). Because `fftconvolve` relies on floating point numbers, + there are certain constraints that may force `method=direct` (more detail + in `choose_conv_method` docstring). + + Examples + -------- + Smooth a square pulse using a Hann window: + + >>> import numpy as np + >>> from scipy import signal + >>> sig = np.repeat([0., 1., 0.], 100) + >>> win = signal.windows.hann(50) + >>> filtered = signal.convolve(sig, win, mode='same') / sum(win) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original pulse') + >>> ax_orig.margins(0, 0.1) + >>> ax_win.plot(win) + >>> ax_win.set_title('Filter impulse response') + >>> ax_win.margins(0, 0.1) + >>> ax_filt.plot(filtered) + >>> ax_filt.set_title('Filtered signal') + >>> ax_filt.margins(0, 0.1) + >>> fig.tight_layout() + >>> fig.show() + + """ + volume = np.asarray(in1) + kernel = np.asarray(in2) + + if volume.ndim == kernel.ndim == 0: + return volume * kernel + elif volume.ndim != kernel.ndim: + raise ValueError("volume and kernel should have the same " + "dimensionality") + + if _inputs_swap_needed(mode, volume.shape, kernel.shape): + # Convolution is commutative; order doesn't have any effect on output + volume, kernel = kernel, volume + + if method == 'auto': + method = choose_conv_method(volume, kernel, mode=mode) + + if method == 'fft': + out = fftconvolve(volume, kernel, mode=mode) + result_type = np.result_type(volume, kernel) + if result_type.kind in {'u', 'i'}: + out = np.around(out) + + if np.isnan(out.flat[0]) or np.isinf(out.flat[0]): + warnings.warn("Use of fft convolution on input with NAN or inf" + " results in NAN or inf output. Consider using" + " method='direct' instead.", + category=RuntimeWarning, stacklevel=2) + + return out.astype(result_type) + elif method == 'direct': + # fastpath to faster numpy.convolve for 1d inputs when possible + if _np_conv_ok(volume, kernel, mode): + return np.convolve(volume, kernel, mode) + + return correlate(volume, _reverse_and_conj(kernel), mode, 'direct') + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def order_filter(a, domain, rank): + """ + Perform an order filter on an N-D array. + + Perform an order filter on the array in. The domain argument acts as a + mask centered over each pixel. The non-zero elements of domain are + used to select elements surrounding each input pixel which are placed + in a list. The list is sorted, and the output for that pixel is the + element corresponding to rank in the sorted list. + + Parameters + ---------- + a : ndarray + The N-dimensional input array. + domain : array_like + A mask array with the same number of dimensions as `a`. + Each dimension should have an odd number of elements. + rank : int + A non-negative integer which selects the element from the + sorted list (0 corresponds to the smallest element, 1 is the + next smallest element, etc.). + + Returns + ------- + out : ndarray + The results of the order filter in an array with the same + shape as `a`. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> domain = np.identity(3) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + >>> signal.order_filter(x, domain, 0) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 2., 0.], + [ 0., 5., 6., 7., 0.], + [ 0., 10., 11., 12., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> signal.order_filter(x, domain, 2) + array([[ 6., 7., 8., 9., 4.], + [ 11., 12., 13., 14., 9.], + [ 16., 17., 18., 19., 14.], + [ 21., 22., 23., 24., 19.], + [ 20., 21., 22., 23., 24.]]) + + """ + domain = np.asarray(domain) + for dimsize in domain.shape: + if (dimsize % 2) != 1: + raise ValueError("Each dimension of domain argument " + "should have an odd number of elements.") + + a = np.asarray(a) + if a.dtype in [object, 'float128']: + mesg = (f"Using order_filter with arrays of dtype {a.dtype} is " + f"deprecated in SciPy 1.11 and will be removed in SciPy 1.14") + warnings.warn(mesg, DeprecationWarning, stacklevel=2) + + result = _sigtools._order_filterND(a, domain, rank) + else: + result = ndimage.rank_filter(a, rank, footprint=domain, mode='constant') + + return result + + +def medfilt(volume, kernel_size=None): + """ + Perform a median filter on an N-dimensional array. + + Apply a median filter to the input array using a local window-size + given by `kernel_size`. The array will automatically be zero-padded. + + Parameters + ---------- + volume : array_like + An N-dimensional input array. + kernel_size : array_like, optional + A scalar or an N-length list giving the size of the median filter + window in each dimension. Elements of `kernel_size` should be odd. + If `kernel_size` is a scalar, then this scalar is used as the size in + each dimension. Default size is 3 for each dimension. + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + Warns + ----- + UserWarning + If array size is smaller than kernel size along any dimension + + See Also + -------- + scipy.ndimage.median_filter + scipy.signal.medfilt2d + + Notes + ----- + The more general function `scipy.ndimage.median_filter` has a more + efficient implementation of a median filter and therefore runs much faster. + + For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes, + the specialised function `scipy.signal.medfilt2d` may be faster. + + """ + volume = np.atleast_1d(volume) + if kernel_size is None: + kernel_size = [3] * volume.ndim + kernel_size = np.asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), volume.ndim) + + for k in range(volume.ndim): + if (kernel_size[k] % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + if any(k > s for k, s in zip(kernel_size, volume.shape)): + warnings.warn('kernel_size exceeds volume extent: the volume will be ' + 'zero-padded.', + stacklevel=2) + + domain = np.ones(kernel_size, dtype=volume.dtype) + + numels = np.prod(kernel_size, axis=0) + order = numels // 2 + + if volume.dtype in [np.bool_, np.complex64, np.complex128, np.clongdouble, + np.float16]: + raise ValueError(f"dtype={volume.dtype} is not supported by medfilt") + + if volume.dtype.char in ['O', 'g']: + mesg = (f"Using medfilt with arrays of dtype {volume.dtype} is " + f"deprecated in SciPy 1.11 and will be removed in SciPy 1.14") + warnings.warn(mesg, DeprecationWarning, stacklevel=2) + + result = _sigtools._order_filterND(volume, domain, order) + else: + size = math.prod(kernel_size) + result = ndimage.rank_filter(volume, size // 2, size=kernel_size, + mode='constant') + + return result + + +def wiener(im, mysize=None, noise=None): + """ + Perform a Wiener filter on an N-dimensional array. + + Apply a Wiener filter to the N-dimensional array `im`. + + Parameters + ---------- + im : ndarray + An N-dimensional array. + mysize : int or array_like, optional + A scalar or an N-length list giving the size of the Wiener filter + window in each dimension. Elements of mysize should be odd. + If mysize is a scalar, then this scalar is used as the size + in each dimension. + noise : float, optional + The noise-power to use. If None, then noise is estimated as the + average of the local variance of the input. + + Returns + ------- + out : ndarray + Wiener filtered result with the same shape as `im`. + + Notes + ----- + This implementation is similar to wiener2 in Matlab/Octave. + For more details see [1]_ + + References + ---------- + .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing, + Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548. + + Examples + -------- + >>> from scipy.datasets import face + >>> from scipy.signal import wiener + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> img = rng.random((40, 40)) #Create a random image + >>> filtered_img = wiener(img, (5, 5)) #Filter the image + >>> f, (plot1, plot2) = plt.subplots(1, 2) + >>> plot1.imshow(img) + >>> plot2.imshow(filtered_img) + >>> plt.show() + + """ + im = np.asarray(im) + if mysize is None: + mysize = [3] * im.ndim + mysize = np.asarray(mysize) + if mysize.shape == (): + mysize = np.repeat(mysize.item(), im.ndim) + + # Estimate the local mean + lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0) + + # Estimate the local variance + lVar = (correlate(im ** 2, np.ones(mysize), 'same') / + np.prod(mysize, axis=0) - lMean ** 2) + + # Estimate the noise power if needed. + if noise is None: + noise = np.mean(np.ravel(lVar), axis=0) + + res = (im - lMean) + res *= (1 - noise / lVar) + res += lMean + out = np.where(lVar < noise, lMean, res) + + return out + + +def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Convolve two 2-dimensional arrays. + + Convolve `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + out : ndarray + A 2-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Examples + -------- + Compute the gradient of an image by 2D convolution with a complex Scharr + operator. (Horizontal operator is real, vertical is imaginary.) Use + symmetric boundary condition to avoid creating edges at the image + boundaries. + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy import datasets + >>> ascent = datasets.ascent() + >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j], + ... [-10+0j, 0+ 0j, +10 +0j], + ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy + >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15)) + >>> ax_orig.imshow(ascent, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_mag.imshow(np.absolute(grad), cmap='gray') + >>> ax_mag.set_title('Gradient magnitude') + >>> ax_mag.set_axis_off() + >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles + >>> ax_ang.set_title('Gradient orientation') + >>> ax_ang.set_axis_off() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('convolve2d inputs must both be 2-D arrays') + + if _inputs_swap_needed(mode, in1.shape, in2.shape): + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) + return out + + +def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Cross-correlate two 2-dimensional arrays. + + Cross correlate `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + correlate2d : ndarray + A 2-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + Notes + ----- + When using "same" mode with even-length inputs, the outputs of `correlate` + and `correlate2d` differ: There is a 1-index offset between them. + + Examples + -------- + Use 2D cross-correlation to find the location of a template in a noisy + image: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy import datasets + >>> rng = np.random.default_rng() + >>> face = datasets.face(gray=True) - datasets.face(gray=True).mean() + >>> template = np.copy(face[300:365, 670:750]) # right eye + >>> template -= template.mean() + >>> face = face + rng.standard_normal(face.shape) * 50 # add noise + >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same') + >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_template.imshow(template, cmap='gray') + >>> ax_template.set_title('Template') + >>> ax_template.set_axis_off() + >>> ax_corr.imshow(corr, cmap='gray') + >>> ax_corr.set_title('Cross-correlation') + >>> ax_corr.set_axis_off() + >>> ax_orig.plot(x, y, 'ro') + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('correlate2d inputs must both be 2-D arrays') + + swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape) + if swapped_inputs: + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue) + + if swapped_inputs: + out = out[::-1, ::-1] + + return out + + +def medfilt2d(input, kernel_size=3): + """ + Median filter a 2-dimensional array. + + Apply a median filter to the `input` array using a local window-size + given by `kernel_size` (must be odd). The array is zero-padded + automatically. + + Parameters + ---------- + input : array_like + A 2-dimensional input array. + kernel_size : array_like, optional + A scalar or a list of length 2, giving the size of the + median filter window in each dimension. Elements of + `kernel_size` should be odd. If `kernel_size` is a scalar, + then this scalar is used as the size in each dimension. + Default is a kernel of size (3, 3). + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + See Also + -------- + scipy.ndimage.median_filter + + Notes + ----- + This is faster than `medfilt` when the input dtype is ``uint8``, + ``float32``, or ``float64``; for other types, this falls back to + `medfilt`. In some situations, `scipy.ndimage.median_filter` may be + faster than this function. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + + # Replaces i,j with the median out of 5*5 window + + >>> signal.medfilt2d(x, kernel_size=5) + array([[ 0, 0, 2, 0, 0], + [ 0, 3, 7, 4, 0], + [ 2, 8, 12, 9, 4], + [ 0, 8, 12, 9, 0], + [ 0, 0, 12, 0, 0]]) + + # Replaces i,j with the median out of default 3*3 window + + >>> signal.medfilt2d(x) + array([[ 0, 1, 2, 3, 0], + [ 1, 6, 7, 8, 4], + [ 6, 11, 12, 13, 9], + [11, 16, 17, 18, 14], + [ 0, 16, 17, 18, 0]]) + + # Replaces i,j with the median out of default 5*3 window + + >>> signal.medfilt2d(x, kernel_size=[5,3]) + array([[ 0, 1, 2, 3, 0], + [ 0, 6, 7, 8, 3], + [ 5, 11, 12, 13, 8], + [ 5, 11, 12, 13, 8], + [ 0, 11, 12, 13, 0]]) + + # Replaces i,j with the median out of default 3*5 window + + >>> signal.medfilt2d(x, kernel_size=[3,5]) + array([[ 0, 0, 2, 1, 0], + [ 1, 5, 7, 6, 3], + [ 6, 10, 12, 11, 8], + [11, 15, 17, 16, 13], + [ 0, 15, 17, 16, 0]]) + + # As seen in the examples, + # kernel numbers must be odd and not exceed original array dim + + """ + image = np.asarray(input) + + # checking dtype.type, rather than just dtype, is necessary for + # excluding np.longdouble with MS Visual C. + if image.dtype.type not in (np.ubyte, np.float32, np.float64): + return medfilt(image, kernel_size) + + if kernel_size is None: + kernel_size = [3] * 2 + kernel_size = np.asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), 2) + + for size in kernel_size: + if (size % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + + return _sigtools._medfilt2d(image, kernel_size) + + +def lfilter(b, a, x, axis=-1, zi=None): + """ + Filter data along one-dimension with an IIR or FIR filter. + + Filter a data sequence, `x`, using a digital filter. This works for many + fundamental data types (including Object type). The filter is a direct + form II transposed implementation of the standard difference equation + (see Notes). + + The function `sosfilt` (and filter design using ``output='sos'``) should be + preferred over `lfilter` for most filtering tasks, as second-order sections + have fewer numerical problems. + + Parameters + ---------- + b : array_like + The numerator coefficient vector in a 1-D sequence. + a : array_like + The denominator coefficient vector in a 1-D sequence. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the filter delays. It is a vector + (or array of vectors for an N-dimensional input) of length + ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then + initial rest is assumed. See `lfiltic` for more information. + + Returns + ------- + y : array + The output of the digital filter. + zf : array, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + lfiltic : Construct initial conditions for `lfilter`. + lfilter_zi : Compute initial state (steady state of step response) for + `lfilter`. + filtfilt : A forward-backward filter, to obtain a filter with zero phase. + savgol_filter : A Savitzky-Golay filter. + sosfilt: Filter data using cascaded second-order sections. + sosfiltfilt: A forward-backward filter using second-order sections. + + Notes + ----- + The filter function is implemented as a direct II transposed structure. + This means that the filter implements:: + + a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] + - a[1]*y[n-1] - ... - a[N]*y[n-N] + + where `M` is the degree of the numerator, `N` is the degree of the + denominator, and `n` is the sample number. It is implemented using + the following difference equations (assuming M = N):: + + a[0]*y[n] = b[0] * x[n] + d[0][n-1] + d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] + d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] + ... + d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] + d[N-1][n] = b[N] * x[n] - a[N] * y[n] + + where `d` are the state variables. + + The rational transfer function describing this filter in the + z-transform domain is:: + + -1 -M + b[0] + b[1]z + ... + b[M] z + Y(z) = -------------------------------- X(z) + -1 -N + a[0] + a[1]z + ... + a[N] z + + Examples + -------- + Generate a noisy signal to be filtered: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> t = np.linspace(-1, 1, 201) + >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + + ... 0.1*np.sin(2*np.pi*1.25*t + 1) + + ... 0.18*np.cos(2*np.pi*3.85*t)) + >>> xn = x + rng.standard_normal(len(t)) * 0.08 + + Create an order 3 lowpass butterworth filter: + + >>> b, a = signal.butter(3, 0.05) + + Apply the filter to xn. Use lfilter_zi to choose the initial condition of + the filter: + + >>> zi = signal.lfilter_zi(b, a) + >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0]) + + Apply the filter again, to have a result filtered at an order the same as + filtfilt: + + >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0]) + + Use filtfilt to apply the filter: + + >>> y = signal.filtfilt(b, a, xn) + + Plot the original signal and the various filtered versions: + + >>> plt.figure + >>> plt.plot(t, xn, 'b', alpha=0.75) + >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k') + >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice', + ... 'filtfilt'), loc='best') + >>> plt.grid(True) + >>> plt.show() + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + if len(a) == 1: + # This path only supports types fdgFDGO to mirror _linear_filter below. + # Any of b, a, x, or zi can set the dtype, but there is no default + # casting of other types; instead a NotImplementedError is raised. + b = np.asarray(b) + a = np.asarray(a) + if b.ndim != 1 and a.ndim != 1: + raise ValueError('object of too small depth for desired array') + x = _validate_x(x) + inputs = [b, a, x] + if zi is not None: + # _linear_filter does not broadcast zi, but does do expansion of + # singleton dims. + zi = np.asarray(zi) + if zi.ndim != x.ndim: + raise ValueError('object of too small depth for desired array') + expected_shape = list(x.shape) + expected_shape[axis] = b.shape[0] - 1 + expected_shape = tuple(expected_shape) + # check the trivial case where zi is the right shape first + if zi.shape != expected_shape: + strides = zi.ndim * [None] + if axis < 0: + axis += zi.ndim + for k in range(zi.ndim): + if k == axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == 1: + strides[k] = 0 + else: + raise ValueError('Unexpected shape for zi: expected ' + f'{expected_shape}, found {zi.shape}.') + zi = np.lib.stride_tricks.as_strided(zi, expected_shape, + strides) + inputs.append(zi) + dtype = np.result_type(*inputs) + + if dtype.char not in 'fdgFDGO': + raise NotImplementedError("input type '%s' not supported" % dtype) + + b = np.array(b, dtype=dtype) + a = np.asarray(a, dtype=dtype) + b /= a[0] + x = np.asarray(x, dtype=dtype) + + out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x) + ind = out_full.ndim * [slice(None)] + if zi is not None: + ind[axis] = slice(zi.shape[axis]) + out_full[tuple(ind)] += zi + + ind[axis] = slice(out_full.shape[axis] - len(b) + 1) + out = out_full[tuple(ind)] + + if zi is None: + return out + else: + ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None) + zf = out_full[tuple(ind)] + return out, zf + else: + if zi is None: + return _sigtools._linear_filter(b, a, x, axis) + else: + return _sigtools._linear_filter(b, a, x, axis, zi) + + +def lfiltic(b, a, y, x=None): + """ + Construct initial conditions for lfilter given input and output vectors. + + Given a linear filter (b, a) and initial conditions on the output `y` + and the input `x`, return the initial conditions on the state vector zi + which is used by `lfilter` to generate the output given the input. + + Parameters + ---------- + b : array_like + Linear filter term. + a : array_like + Linear filter term. + y : array_like + Initial conditions. + + If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. + + If `y` is too short, it is padded with zeros. + x : array_like, optional + Initial conditions. + + If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. + + If `x` is not given, its initial conditions are assumed zero. + + If `x` is too short, it is padded with zeros. + + Returns + ------- + zi : ndarray + The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, + where ``K = max(M, N)``. + + See Also + -------- + lfilter, lfilter_zi + + """ + N = np.size(a) - 1 + M = np.size(b) - 1 + K = max(M, N) + y = np.asarray(y) + + if x is None: + result_type = np.result_type(np.asarray(b), np.asarray(a), y) + if result_type.kind in 'bui': + result_type = np.float64 + x = np.zeros(M, dtype=result_type) + else: + x = np.asarray(x) + + result_type = np.result_type(np.asarray(b), np.asarray(a), y, x) + if result_type.kind in 'bui': + result_type = np.float64 + x = x.astype(result_type) + + L = np.size(x) + if L < M: + x = np.r_[x, np.zeros(M - L)] + + y = y.astype(result_type) + zi = np.zeros(K, result_type) + + L = np.size(y) + if L < N: + y = np.r_[y, np.zeros(N - L)] + + for m in range(M): + zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0) + + for m in range(N): + zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0) + + return zi + + +def deconvolve(signal, divisor): + """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. + + Returns the quotient and remainder such that + ``signal = convolve(divisor, quotient) + remainder`` + + Parameters + ---------- + signal : (N,) array_like + Signal data, typically a recorded signal + divisor : (N,) array_like + Divisor data, typically an impulse response or filter that was + applied to the original signal + + Returns + ------- + quotient : ndarray + Quotient, typically the recovered original signal + remainder : ndarray + Remainder + + See Also + -------- + numpy.polydiv : performs polynomial division (same operation, but + also accepts poly1d objects) + + Examples + -------- + Deconvolve a signal that's been filtered: + + >>> from scipy import signal + >>> original = [0, 1, 0, 0, 1, 1, 0, 0] + >>> impulse_response = [2, 1] + >>> recorded = signal.convolve(impulse_response, original) + >>> recorded + array([0, 2, 1, 0, 2, 3, 1, 0, 0]) + >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) + >>> recovered + array([ 0., 1., 0., 0., 1., 1., 0., 0.]) + + """ + num = np.atleast_1d(signal) + den = np.atleast_1d(divisor) + if num.ndim > 1: + raise ValueError("signal must be 1-D.") + if den.ndim > 1: + raise ValueError("divisor must be 1-D.") + N = len(num) + D = len(den) + if D > N: + quot = [] + rem = num + else: + input = np.zeros(N - D + 1, float) + input[0] = 1 + quot = lfilter(num, den, input) + rem = num - convolve(den, quot, mode='full') + return quot, rem + + +def hilbert(x, N=None, axis=-1): + """ + Compute the analytic signal, using the Hilbert transform. + + The transformation is done along the last axis by default. + + Parameters + ---------- + x : array_like + Signal data. Must be real. + N : int, optional + Number of Fourier components. Default: ``x.shape[axis]`` + axis : int, optional + Axis along which to do the transformation. Default: -1. + + Returns + ------- + xa : ndarray + Analytic signal of `x`, of each 1-D array along `axis` + + Notes + ----- + The analytic signal ``x_a(t)`` of signal ``x(t)`` is: + + .. math:: x_a = F^{-1}(F(x) 2U) = x + i y + + where `F` is the Fourier transform, `U` the unit step function, + and `y` the Hilbert transform of `x`. [1]_ + + In other words, the negative half of the frequency spectrum is zeroed + out, turning the real-valued signal into a complex signal. The Hilbert + transformed signal can be obtained from ``np.imag(hilbert(x))``, and the + original signal from ``np.real(hilbert(x))``. + + References + ---------- + .. [1] Wikipedia, "Analytic signal". + https://en.wikipedia.org/wiki/Analytic_signal + .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2. + .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal + Processing, Third Edition, 2009. Chapter 12. + ISBN 13: 978-1292-02572-8 + + Examples + -------- + In this example we use the Hilbert transform to determine the amplitude + envelope and instantaneous frequency of an amplitude-modulated signal. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import hilbert, chirp + + >>> duration = 1.0 + >>> fs = 400.0 + >>> samples = int(fs*duration) + >>> t = np.arange(samples) / fs + + We create a chirp of which the frequency increases from 20 Hz to 100 Hz and + apply an amplitude modulation. + + >>> signal = chirp(t, 20.0, t[-1], 100.0) + >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) ) + + The amplitude envelope is given by magnitude of the analytic signal. The + instantaneous frequency can be obtained by differentiating the + instantaneous phase in respect to time. The instantaneous phase corresponds + to the phase angle of the analytic signal. + + >>> analytic_signal = hilbert(signal) + >>> amplitude_envelope = np.abs(analytic_signal) + >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal)) + >>> instantaneous_frequency = (np.diff(instantaneous_phase) / + ... (2.0*np.pi) * fs) + + >>> fig, (ax0, ax1) = plt.subplots(nrows=2) + >>> ax0.plot(t, signal, label='signal') + >>> ax0.plot(t, amplitude_envelope, label='envelope') + >>> ax0.set_xlabel("time in seconds") + >>> ax0.legend() + >>> ax1.plot(t[1:], instantaneous_frequency) + >>> ax1.set_xlabel("time in seconds") + >>> ax1.set_ylim(0.0, 120.0) + >>> fig.tight_layout() + + """ + x = np.asarray(x) + if np.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape[axis] + if N <= 0: + raise ValueError("N must be positive.") + + Xf = sp_fft.fft(x, N, axis=axis) + h = np.zeros(N, dtype=Xf.dtype) + if N % 2 == 0: + h[0] = h[N // 2] = 1 + h[1:N // 2] = 2 + else: + h[0] = 1 + h[1:(N + 1) // 2] = 2 + + if x.ndim > 1: + ind = [np.newaxis] * x.ndim + ind[axis] = slice(None) + h = h[tuple(ind)] + x = sp_fft.ifft(Xf * h, axis=axis) + return x + + +def hilbert2(x, N=None): + """ + Compute the '2-D' analytic signal of `x` + + Parameters + ---------- + x : array_like + 2-D signal data. + N : int or tuple of two ints, optional + Number of Fourier components. Default is ``x.shape`` + + Returns + ------- + xa : ndarray + Analytic signal of `x` taken along axes (0,1). + + References + ---------- + .. [1] Wikipedia, "Analytic signal", + https://en.wikipedia.org/wiki/Analytic_signal + + """ + x = np.atleast_2d(x) + if x.ndim > 2: + raise ValueError("x must be 2-D.") + if np.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape + elif isinstance(N, int): + if N <= 0: + raise ValueError("N must be positive.") + N = (N, N) + elif len(N) != 2 or np.any(np.asarray(N) <= 0): + raise ValueError("When given as a tuple, N must hold exactly " + "two positive integers") + + Xf = sp_fft.fft2(x, N, axes=(0, 1)) + h1 = np.zeros(N[0], dtype=Xf.dtype) + h2 = np.zeros(N[1], dtype=Xf.dtype) + for h in (h1, h2): + N1 = h.shape[0] + if N1 % 2 == 0: + h[0] = h[N1 // 2] = 1 + h[1:N1 // 2] = 2 + else: + h[0] = 1 + h[1:(N1 + 1) // 2] = 2 + + h = h1[:, np.newaxis] * h2[np.newaxis, :] + k = x.ndim + while k > 2: + h = h[:, np.newaxis] + k -= 1 + x = sp_fft.ifft2(Xf * h, axes=(0, 1)) + return x + + +_msg_cplx_sort="""cmplx_sort was deprecated in SciPy 1.12 and will be removed +in SciPy 1.15. The exact equivalent for a numpy array argument is +>>> def cmplx_sort(p): +... idx = np.argsort(abs(p)) +... return np.take(p, idx, 0), idx +""" + +def cmplx_sort(p): + warnings.warn(_msg_cplx_sort, DeprecationWarning, stacklevel=2) + return _cmplx_sort(p) + + +def _cmplx_sort(p): + """Sort roots based on magnitude. + + Parameters + ---------- + p : array_like + The roots to sort, as a 1-D array. + + Returns + ------- + p_sorted : ndarray + Sorted roots. + indx : ndarray + Array of indices needed to sort the input `p`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [1, 4, 1+1.j, 3] + >>> p_sorted, indx = signal.cmplx_sort(vals) + >>> p_sorted + array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) + >>> indx + array([0, 2, 3, 1]) + """ + p = np.asarray(p) + indx = np.argsort(abs(p)) + return np.take(p, indx, 0), indx + + +def unique_roots(p, tol=1e-3, rtype='min'): + """Determine unique roots and their multiplicities from a list of roots. + + Parameters + ---------- + p : array_like + The list of roots. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. Refer to Notes about + the details on roots grouping. + rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max', 'maximum': pick the maximum of those roots + - 'min', 'minimum': pick the minimum of those roots + - 'avg', 'mean': take the average of those roots + + When finding minimum or maximum among complex roots they are compared + first by the real part and then by the imaginary part. + + Returns + ------- + unique : ndarray + The list of unique roots. + multiplicity : ndarray + The multiplicity of each root. + + Notes + ----- + If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to + ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it + doesn't necessarily mean that ``a`` is close to ``c``. It means that roots + grouping is not unique. In this function we use "greedy" grouping going + through the roots in the order they are given in the input `p`. + + This utility function is not specific to roots but can be used for any + sequence of values for which uniqueness and multiplicity has to be + determined. For a more general routine, see `numpy.unique`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] + >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg') + + Check which roots have multiplicity larger than 1: + + >>> uniq[mult > 1] + array([ 1.305]) + """ + if rtype in ['max', 'maximum']: + reduce = np.max + elif rtype in ['min', 'minimum']: + reduce = np.min + elif rtype in ['avg', 'mean']: + reduce = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + p = np.asarray(p) + + points = np.empty((len(p), 2)) + points[:, 0] = np.real(p) + points[:, 1] = np.imag(p) + tree = cKDTree(points) + + p_unique = [] + p_multiplicity = [] + used = np.zeros(len(p), dtype=bool) + for i in range(len(p)): + if used[i]: + continue + + group = tree.query_ball_point(points[i], tol) + group = [x for x in group if not used[x]] + + p_unique.append(reduce(p[group])) + p_multiplicity.append(len(group)) + + used[group] = True + + return np.asarray(p_unique), np.asarray(p_multiplicity) + + +def invres(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(s) and a(s) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `invresz`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residue, invresz, unique_roots + + """ + r = np.atleast_1d(r) + p = np.atleast_1d(p) + k = np.trim_zeros(np.atleast_1d(k), 'f') + + unique_poles, multiplicity = _group_poles(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = np.polymul(k, denominator) + + for residue, factor in zip(r, factors): + numerator = np.polyadd(numerator, residue * factor) + + return numerator, denominator + + +def _compute_factors(roots, multiplicity, include_powers=False): + """Compute the total polynomial divided by factors for each root.""" + current = np.array([1]) + suffixes = [current] + for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]): + monomial = np.array([1, -pole]) + for _ in range(mult): + current = np.polymul(current, monomial) + suffixes.append(current) + suffixes = suffixes[::-1] + + factors = [] + current = np.array([1]) + for pole, mult, suffix in zip(roots, multiplicity, suffixes): + monomial = np.array([1, -pole]) + block = [] + for i in range(mult): + if i == 0 or include_powers: + block.append(np.polymul(current, suffix)) + current = np.polymul(current, monomial) + factors.extend(reversed(block)) + + return factors, current + + +def _compute_residues(poles, multiplicity, numerator): + denominator_factors, _ = _compute_factors(poles, multiplicity) + numerator = numerator.astype(poles.dtype) + + residues = [] + for pole, mult, factor in zip(poles, multiplicity, + denominator_factors): + if mult == 1: + residues.append(np.polyval(numerator, pole) / + np.polyval(factor, pole)) + else: + numer = numerator.copy() + monomial = np.array([1, -pole]) + factor, d = np.polydiv(factor, monomial) + + block = [] + for _ in range(mult): + numer, n = np.polydiv(numer, monomial) + r = n[0] / d[0] + numer = np.polysub(numer, r * factor) + block.append(r) + + residues.extend(reversed(block)) + + return np.asarray(residues) + + +def residue(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(s) / a(s). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `residuez`. + + See Notes for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invres, residuez, numpy.poly, unique_roots + + Notes + ----- + The "deflation through subtraction" algorithm is used for + computations --- method 6 in [1]_. + + The form of partial fraction expansion depends on poles multiplicity in + the exact mathematical sense. However there is no way to exactly + determine multiplicity of roots of a polynomial in numerical computing. + Thus you should think of the result of `residue` with given `tol` as + partial fraction expansion computed for the denominator composed of the + computed poles with empirically determined multiplicity. The choice of + `tol` can drastically change the result if there are close poles. + + References + ---------- + .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a + review of computational methodology and efficiency", Journal of + Computational and Applied Mathematics, Vol. 9, 1983. + """ + b = np.asarray(b) + a = np.asarray(a) + if (np.issubdtype(b.dtype, np.complexfloating) + or np.issubdtype(a.dtype, np.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = np.trim_zeros(np.atleast_1d(b), 'f') + a = np.trim_zeros(np.atleast_1d(a), 'f') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + + poles = np.roots(a) + if b.size == 0: + return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) + + if len(b) < len(a): + k = np.empty(0) + else: + k, b = np.polydiv(b, a) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(unique_poles, multiplicity, b) + + index = 0 + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + index += mult + + return residues / a[0], poles, k + + +def residuez(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(z) / a(z). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `residue`. + + See Notes of `residue` for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invresz, residue, unique_roots + """ + b = np.asarray(b) + a = np.asarray(a) + if (np.issubdtype(b.dtype, np.complexfloating) + or np.issubdtype(a.dtype, np.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = np.trim_zeros(np.atleast_1d(b), 'b') + a = np.trim_zeros(np.atleast_1d(a), 'b') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + elif a[0] == 0: + raise ValueError("First coefficient of determinant `a` must be " + "non-zero.") + + poles = np.roots(a) + if b.size == 0: + return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) + + b_rev = b[::-1] + a_rev = a[::-1] + + if len(b_rev) < len(a_rev): + k_rev = np.empty(0) + else: + k_rev, b_rev = np.polydiv(b_rev, a_rev) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(1 / unique_poles, multiplicity, b_rev) + + index = 0 + powers = np.empty(len(residues), dtype=int) + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + powers[index:index + mult] = 1 + np.arange(mult) + index += mult + + residues *= (-poles) ** powers / a_rev[0] + + return residues, poles, k_rev[::-1] + + +def _group_poles(poles, tol, rtype): + if rtype in ['max', 'maximum']: + reduce = np.max + elif rtype in ['min', 'minimum']: + reduce = np.min + elif rtype in ['avg', 'mean']: + reduce = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + unique = [] + multiplicity = [] + + pole = poles[0] + block = [pole] + for i in range(1, len(poles)): + if abs(poles[i] - pole) <= tol: + block.append(pole) + else: + unique.append(reduce(block)) + multiplicity.append(len(block)) + pole = poles[i] + block = [pole] + + unique.append(reduce(block)) + multiplicity.append(len(block)) + + return np.asarray(unique), np.asarray(multiplicity) + + +def invresz(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(z) and a(z) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `invres`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residuez, unique_roots, invres + + """ + r = np.atleast_1d(r) + p = np.atleast_1d(p) + k = np.trim_zeros(np.atleast_1d(k), 'b') + + unique_poles, multiplicity = _group_poles(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = np.polymul(k[::-1], denominator[::-1]) + + for residue, factor in zip(r, factors): + numerator = np.polyadd(numerator, residue * factor[::-1]) + + return numerator[::-1], denominator + + +def resample(x, num, t=None, axis=0, window=None, domain='time'): + """ + Resample `x` to `num` samples using Fourier method along the given axis. + + The resampled signal starts at the same value as `x` but is sampled + with a spacing of ``len(x) / num * (spacing of x)``. Because a + Fourier method is used, the signal is assumed to be periodic. + + Parameters + ---------- + x : array_like + The data to be resampled. + num : int + The number of samples in the resampled signal. + t : array_like, optional + If `t` is given, it is assumed to be the equally spaced sample + positions associated with the signal data in `x`. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. See below for details. + domain : string, optional + A string indicating the domain of the input `x`: + ``time`` Consider the input `x` as time-domain (Default), + ``freq`` Consider the input `x` as frequency-domain. + + Returns + ------- + resampled_x or (resampled_x, resampled_t) + Either the resampled array, or, if `t` was given, a tuple + containing the resampled array and the corresponding resampled + positions. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The argument `window` controls a Fourier-domain window that tapers + the Fourier spectrum before zero-padding to alleviate ringing in + the resampled values for sampled signals you didn't intend to be + interpreted as band-limited. + + If `window` is a function, then it is called with a vector of inputs + indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). + + If `window` is an array of the same length as `x.shape[axis]` it is + assumed to be the window to be applied directly in the Fourier + domain (with dc and low-frequency first). + + For any other type of `window`, the function `scipy.signal.get_window` + is called to generate the window. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * len(x) / num``. + + If `t` is not None, then it is used solely to calculate the resampled + positions `resampled_t` + + As noted, `resample` uses FFT transformations, which can be very + slow if the number of input or output samples is large and prime; + see `scipy.fft.fft`. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle: + + >>> import numpy as np + >>> from scipy import signal + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f = signal.resample(y, 100) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro') + >>> plt.legend(['data', 'resampled'], loc='best') + >>> plt.show() + """ + + if domain not in ('time', 'freq'): + raise ValueError("Acceptable domain flags are 'time' or" + f" 'freq', not domain={domain}") + + x = np.asarray(x) + Nx = x.shape[axis] + + # Check if we can use faster real FFT + real_input = np.isrealobj(x) + + if domain == 'time': + # Forward transform + if real_input: + X = sp_fft.rfft(x, axis=axis) + else: # Full complex FFT + X = sp_fft.fft(x, axis=axis) + else: # domain == 'freq' + X = x + + # Apply window to spectrum + if window is not None: + if callable(window): + W = window(sp_fft.fftfreq(Nx)) + elif isinstance(window, np.ndarray): + if window.shape != (Nx,): + raise ValueError('window must have the same length as data') + W = window + else: + W = sp_fft.ifftshift(get_window(window, Nx)) + + newshape_W = [1] * x.ndim + newshape_W[axis] = X.shape[axis] + if real_input: + # Fold the window back on itself to mimic complex behavior + W_real = W.copy() + W_real[1:] += W_real[-1:0:-1] + W_real[1:] *= 0.5 + X *= W_real[:newshape_W[axis]].reshape(newshape_W) + else: + X *= W.reshape(newshape_W) + + # Copy each half of the original spectrum to the output spectrum, either + # truncating high frequencies (downsampling) or zero-padding them + # (upsampling) + + # Placeholder array for output spectrum + newshape = list(x.shape) + if real_input: + newshape[axis] = num // 2 + 1 + else: + newshape[axis] = num + Y = np.zeros(newshape, X.dtype) + + # Copy positive frequency components (and Nyquist, if present) + N = min(num, Nx) + nyq = N // 2 + 1 # Slice index that includes Nyquist if present + sl = [slice(None)] * x.ndim + sl[axis] = slice(0, nyq) + Y[tuple(sl)] = X[tuple(sl)] + if not real_input: + # Copy negative frequency components + if N > 2: # (slice expression doesn't collapse to empty array) + sl[axis] = slice(nyq - N, None) + Y[tuple(sl)] = X[tuple(sl)] + + # Split/join Nyquist component(s) if present + # So far we have set Y[+N/2]=X[+N/2] + if N % 2 == 0: + if num < Nx: # downsampling + if real_input: + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 2. + else: + # select the component of Y at frequency +N/2, + # add the component of X at -N/2 + sl[axis] = slice(-N//2, -N//2 + 1) + Y[tuple(sl)] += X[tuple(sl)] + elif Nx < num: # upsampling + # select the component at frequency +N/2 and halve it + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 0.5 + if not real_input: + temp = Y[tuple(sl)] + # set the component at -N/2 equal to the component at +N/2 + sl[axis] = slice(num-N//2, num-N//2 + 1) + Y[tuple(sl)] = temp + + # Inverse transform + if real_input: + y = sp_fft.irfft(Y, num, axis=axis) + else: + y = sp_fft.ifft(Y, axis=axis, overwrite_x=True) + + y *= (float(num) / float(Nx)) + + if t is None: + return y + else: + new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] + return y, new_t + + +def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0), + padtype='constant', cval=None): + """ + Resample `x` along the given axis using polyphase filtering. + + The signal `x` is upsampled by the factor `up`, a zero-phase low-pass + FIR filter is applied, and then it is downsampled by the factor `down`. + The resulting sample rate is ``up / down`` times the original sample + rate. By default, values beyond the boundary of the signal are assumed + to be zero during the filtering step. + + Parameters + ---------- + x : array_like + The data to be resampled. + up : int + The upsampling factor. + down : int + The downsampling factor. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : string, tuple, or array_like, optional + Desired window to use to design the low-pass filter, or the FIR filter + coefficients to employ. See below for details. + padtype : string, optional + `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of + the other signal extension modes supported by `scipy.signal.upfirdn`. + Changes assumptions on values beyond the boundary. If `constant`, + assumed to be `cval` (default zero). If `line` assumed to continue a + linear trend defined by the first and last points. `mean`, `median`, + `maximum` and `minimum` work as in `np.pad` and assume that the values + beyond the boundary are the mean, median, maximum or minimum + respectively of the array along the axis. + + .. versionadded:: 1.4.0 + cval : float, optional + Value to use if `padtype='constant'`. Default is zero. + + .. versionadded:: 1.4.0 + + Returns + ------- + resampled_x : array + The resampled array. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample : Resample up or down using the FFT method. + + Notes + ----- + This polyphase method will likely be faster than the Fourier method + in `scipy.signal.resample` when the number of samples is large and + prime, or when the number of samples is large and `up` and `down` + share a large greatest common denominator. The length of the FIR + filter used will depend on ``max(up, down) // gcd(up, down)``, and + the number of operations during polyphase filtering will depend on + the filter length and `down` (see `scipy.signal.upfirdn` for details). + + The argument `window` specifies the FIR low-pass filter design. + + If `window` is an array_like it is assumed to be the FIR filter + coefficients. Note that the FIR filter is applied after the upsampling + step, so it should be designed to operate on a signal at a sampling + frequency higher than the original by a factor of `up//gcd(up, down)`. + This function's output will be centered with respect to this array, so it + is best to pass a symmetric filter with an odd number of samples if, as + is usually the case, a zero-phase filter is desired. + + For any other type of `window`, the functions `scipy.signal.get_window` + and `scipy.signal.firwin` are called to generate the appropriate filter + coefficients. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * down / float(up)``. + + Examples + -------- + By default, the end of the resampled data rises to meet the first + sample of the next cycle for the FFT method, and gets closer to zero + for the polyphase method: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f_fft = signal.resample(y, 100) + >>> f_poly = signal.resample_poly(y, 100, 20) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-') + >>> plt.plot(x, y, 'ko-') + >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries + >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') + >>> plt.show() + + This default behaviour can be changed by using the padtype option: + + >>> N = 5 + >>> x = np.linspace(0, 1, N, endpoint=False) + >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x) + >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x) + >>> Y = np.stack([y, y2], axis=-1) + >>> up = 4 + >>> xr = np.linspace(0, 1, N*up, endpoint=False) + + >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant') + >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean') + >>> y4 = signal.resample_poly(Y, up, 1, padtype='line') + + >>> for i in [0,1]: + ... plt.figure() + ... plt.plot(xr, y4[:,i], 'g.', label='line') + ... plt.plot(xr, y3[:,i], 'y.', label='mean') + ... plt.plot(xr, y2[:,i], 'r.', label='constant') + ... plt.plot(x, Y[:,i], 'k-') + ... plt.legend() + >>> plt.show() + + """ + x = np.asarray(x) + if up != int(up): + raise ValueError("up must be an integer") + if down != int(down): + raise ValueError("down must be an integer") + up = int(up) + down = int(down) + if up < 1 or down < 1: + raise ValueError('up and down must be >= 1') + if cval is not None and padtype != 'constant': + raise ValueError('cval has no effect when padtype is ', padtype) + + # Determine our up and down factors + # Use a rational approximation to save computation time on really long + # signals + g_ = math.gcd(up, down) + up //= g_ + down //= g_ + if up == down == 1: + return x.copy() + n_in = x.shape[axis] + n_out = n_in * up + n_out = n_out // down + bool(n_out % down) + + if isinstance(window, (list, np.ndarray)): + window = np.array(window) # use array to force a copy (we modify it) + if window.ndim > 1: + raise ValueError('window must be 1-D') + half_len = (window.size - 1) // 2 + h = window + else: + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for sinc-like function + h = firwin(2 * half_len + 1, f_c, + window=window).astype(x.dtype) # match dtype of x + h *= up + + # Zero-pad our filter to put the output samples at the center + n_pre_pad = (down - half_len % down) + n_post_pad = 0 + n_pre_remove = (half_len + n_pre_pad) // down + # We should rarely need to do this given our filter lengths... + while _output_len(len(h) + n_pre_pad + n_post_pad, n_in, + up, down) < n_out + n_pre_remove: + n_post_pad += 1 + h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h, + np.zeros(n_post_pad, dtype=h.dtype))) + n_pre_remove_end = n_pre_remove + n_out + + # Remove background depending on the padtype option + funcs = {'mean': np.mean, 'median': np.median, + 'minimum': np.amin, 'maximum': np.amax} + upfirdn_kwargs = {'mode': 'constant', 'cval': 0} + if padtype in funcs: + background_values = funcs[padtype](x, axis=axis, keepdims=True) + elif padtype in _upfirdn_modes: + upfirdn_kwargs = {'mode': padtype} + if padtype == 'constant': + if cval is None: + cval = 0 + upfirdn_kwargs['cval'] = cval + else: + raise ValueError( + 'padtype must be one of: maximum, mean, median, minimum, ' + + ', '.join(_upfirdn_modes)) + + if padtype in funcs: + x = x - background_values + + # filter then remove excess + y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs) + keep = [slice(None), ]*x.ndim + keep[axis] = slice(n_pre_remove, n_pre_remove_end) + y_keep = y[tuple(keep)] + + # Add background back + if padtype in funcs: + y_keep += background_values + + return y_keep + + +def vectorstrength(events, period): + ''' + Determine the vector strength of the events corresponding to the given + period. + + The vector strength is a measure of phase synchrony, how well the + timing of the events is synchronized to a single period of a periodic + signal. + + If multiple periods are used, calculate the vector strength of each. + This is called the "resonating vector strength". + + Parameters + ---------- + events : 1D array_like + An array of time points containing the timing of the events. + period : float or array_like + The period of the signal that the events should synchronize to. + The period is in the same units as `events`. It can also be an array + of periods, in which case the outputs are arrays of the same length. + + Returns + ------- + strength : float or 1D array + The strength of the synchronization. 1.0 is perfect synchronization + and 0.0 is no synchronization. If `period` is an array, this is also + an array with each element containing the vector strength at the + corresponding period. + phase : float or array + The phase that the events are most strongly synchronized to in radians. + If `period` is an array, this is also an array with each element + containing the phase for the corresponding period. + + References + ---------- + van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector + strength: Auditory system, electric fish, and noise. + Chaos 21, 047508 (2011); + :doi:`10.1063/1.3670512`. + van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: + biological and mathematical perspectives. Biol Cybern. + 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`. + van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens + when we vary the "probing" frequency while keeping the spike times + fixed. Biol Cybern. 2013 Aug;107(4):491-94. + :doi:`10.1007/s00422-013-0560-8`. + ''' + events = np.asarray(events) + period = np.asarray(period) + if events.ndim > 1: + raise ValueError('events cannot have dimensions more than 1') + if period.ndim > 1: + raise ValueError('period cannot have dimensions more than 1') + + # we need to know later if period was originally a scalar + scalarperiod = not period.ndim + + events = np.atleast_2d(events) + period = np.atleast_2d(period) + if (period <= 0).any(): + raise ValueError('periods must be positive') + + # this converts the times to vectors + vectors = np.exp(np.dot(2j*np.pi/period.T, events)) + + # the vector strength is just the magnitude of the mean of the vectors + # the vector phase is the angle of the mean of the vectors + vectormean = np.mean(vectors, axis=1) + strength = abs(vectormean) + phase = np.angle(vectormean) + + # if the original period was a scalar, return scalars + if scalarperiod: + strength = strength[0] + phase = phase[0] + return strength, phase + + +def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False): + """ + Remove linear trend along axis from data. + + Parameters + ---------- + data : array_like + The input data. + axis : int, optional + The axis along which to detrend the data. By default this is the + last axis (-1). + type : {'linear', 'constant'}, optional + The type of detrending. If ``type == 'linear'`` (default), + the result of a linear least-squares fit to `data` is subtracted + from `data`. + If ``type == 'constant'``, only the mean of `data` is subtracted. + bp : array_like of ints, optional + A sequence of break points. If given, an individual linear fit is + performed for each part of `data` between two break points. + Break points are specified as indices into `data`. This parameter + only has an effect when ``type == 'linear'``. + overwrite_data : bool, optional + If True, perform in place detrending and avoid a copy. Default is False + + Returns + ------- + ret : ndarray + The detrended input data. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> npoints = 1000 + >>> noise = rng.standard_normal(npoints) + >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise + >>> (signal.detrend(x) - noise).max() + 0.06 # random + + """ + if type not in ['linear', 'l', 'constant', 'c']: + raise ValueError("Trend type must be 'linear' or 'constant'.") + data = np.asarray(data) + dtype = data.dtype.char + if dtype not in 'dfDF': + dtype = 'd' + if type in ['constant', 'c']: + ret = data - np.mean(data, axis, keepdims=True) + return ret + else: + dshape = data.shape + N = dshape[axis] + bp = np.sort(np.unique(np.concatenate(np.atleast_1d(0, bp, N)))) + if np.any(bp > N): + raise ValueError("Breakpoints must be less than length " + "of data along given axis.") + + # Restructure data so that axis is along first dimension and + # all other dimensions are collapsed into second dimension + rnk = len(dshape) + if axis < 0: + axis = axis + rnk + newdata = np.moveaxis(data, axis, 0) + newdata_shape = newdata.shape + newdata = newdata.reshape(N, -1) + + if not overwrite_data: + newdata = newdata.copy() # make sure we have a copy + if newdata.dtype.char not in 'dfDF': + newdata = newdata.astype(dtype) + +# Nreg = len(bp) - 1 + # Find leastsq fit and remove it for each piece + for m in range(len(bp) - 1): + Npts = bp[m + 1] - bp[m] + A = np.ones((Npts, 2), dtype) + A[:, 0] = np.arange(1, Npts + 1, dtype=dtype) / Npts + sl = slice(bp[m], bp[m + 1]) + coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) + newdata[sl] = newdata[sl] - A @ coef + + # Put data back in original shape. + newdata = newdata.reshape(newdata_shape) + ret = np.moveaxis(newdata, 0, axis) + return ret + + +def lfilter_zi(b, a): + """ + Construct initial conditions for lfilter for step response steady-state. + + Compute an initial state `zi` for the `lfilter` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + b, a : array_like (1-D) + The IIR filter coefficients. See `lfilter` for more + information. + + Returns + ------- + zi : 1-D ndarray + The initial state for the filter. + + See Also + -------- + lfilter, lfiltic, filtfilt + + Notes + ----- + A linear filter with order m has a state space representation (A, B, C, D), + for which the output y of the filter can be expressed as:: + + z(n+1) = A*z(n) + B*x(n) + y(n) = C*z(n) + D*x(n) + + where z(n) is a vector of length m, A has shape (m, m), B has shape + (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is + a scalar). lfilter_zi solves:: + + zi = A*zi + B + + In other words, it finds the initial condition for which the response + to an input of all ones is a constant. + + Given the filter coefficients `a` and `b`, the state space matrices + for the transposed direct form II implementation of the linear filter, + which is the implementation used by scipy.signal.lfilter, are:: + + A = scipy.linalg.companion(a).T + B = b[1:] - a[1:]*b[0] + + assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first + divided by a[0]. + + Examples + -------- + The following code creates a lowpass Butterworth filter. Then it + applies that filter to an array whose values are all 1.0; the + output is also all 1.0, as expected for a lowpass filter. If the + `zi` argument of `lfilter` had not been given, the output would have + shown the transient signal. + + >>> from numpy import array, ones + >>> from scipy.signal import lfilter, lfilter_zi, butter + >>> b, a = butter(5, 0.25) + >>> zi = lfilter_zi(b, a) + >>> y, zo = lfilter(b, a, ones(10), zi=zi) + >>> y + array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) + + Another example: + + >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) + >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) + >>> y + array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, + 0.44399389, 0.35505241]) + + Note that the `zi` argument to `lfilter` was computed using + `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no + transient until the input drops from 0.5 to 0.0. + + """ + + # FIXME: Can this function be replaced with an appropriate + # use of lfiltic? For example, when b,a = butter(N,Wn), + # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). + # + + # We could use scipy.signal.normalize, but it uses warnings in + # cases where a ValueError is more appropriate, and it allows + # b to be 2D. + b = np.atleast_1d(b) + if b.ndim != 1: + raise ValueError("Numerator b must be 1-D.") + a = np.atleast_1d(a) + if a.ndim != 1: + raise ValueError("Denominator a must be 1-D.") + + while len(a) > 1 and a[0] == 0.0: + a = a[1:] + if a.size < 1: + raise ValueError("There must be at least one nonzero `a` coefficient.") + + if a[0] != 1.0: + # Normalize the coefficients so a[0] == 1. + b = b / a[0] + a = a / a[0] + + n = max(len(a), len(b)) + + # Pad a or b with zeros so they are the same length. + if len(a) < n: + a = np.r_[a, np.zeros(n - len(a), dtype=a.dtype)] + elif len(b) < n: + b = np.r_[b, np.zeros(n - len(b), dtype=b.dtype)] + + IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T + B = b[1:] - a[1:] * b[0] + # Solve zi = A*zi + B + zi = np.linalg.solve(IminusA, B) + + # For future reference: we could also use the following + # explicit formulas to solve the linear system: + # + # zi = np.zeros(n - 1) + # zi[0] = B.sum() / IminusA[:,0].sum() + # asum = 1.0 + # csum = 0.0 + # for k in range(1,n-1): + # asum += a[k] + # csum += b[k] - a[k]*b[0] + # zi[k] = asum*zi[0] - csum + + return zi + + +def sosfilt_zi(sos): + """ + Construct initial conditions for sosfilt for step response steady-state. + + Compute an initial state `zi` for the `sosfilt` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + zi : ndarray + Initial conditions suitable for use with ``sosfilt``, shape + ``(n_sections, 2)``. + + See Also + -------- + sosfilt, zpk2sos + + Notes + ----- + .. versionadded:: 0.16.0 + + Examples + -------- + Filter a rectangular pulse that begins at time 0, with and without + the use of the `zi` argument of `scipy.signal.sosfilt`. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sos = signal.butter(9, 0.125, output='sos') + >>> zi = signal.sosfilt_zi(sos) + >>> x = (np.arange(250) < 100).astype(int) + >>> f1 = signal.sosfilt(sos, x) + >>> f2, zo = signal.sosfilt(sos, x, zi=zi) + + >>> plt.plot(x, 'k--', label='x') + >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered') + >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + sos = np.asarray(sos) + if sos.ndim != 2 or sos.shape[1] != 6: + raise ValueError('sos must be shape (n_sections, 6)') + + if sos.dtype.kind in 'bui': + sos = sos.astype(np.float64) + + n_sections = sos.shape[0] + zi = np.empty((n_sections, 2), dtype=sos.dtype) + scale = 1.0 + for section in range(n_sections): + b = sos[section, :3] + a = sos[section, 3:] + zi[section] = scale * lfilter_zi(b, a) + # If H(z) = B(z)/A(z) is this section's transfer function, then + # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady + # state value of this section's step response. + scale *= b.sum() / a.sum() + + return zi + + +def _filtfilt_gust(b, a, x, axis=-1, irlen=None): + """Forward-backward IIR filter that uses Gustafsson's method. + + Apply the IIR filter defined by `(b,a)` to `x` twice, first forward + then backward, using Gustafsson's initial conditions [1]_. + + Let ``y_fb`` be the result of filtering first forward and then backward, + and let ``y_bf`` be the result of filtering first backward then forward. + Gustafsson's method is to compute initial conditions for the forward + pass and the backward pass such that ``y_fb == y_bf``. + + Parameters + ---------- + b : scalar or 1-D ndarray + Numerator coefficients of the filter. + a : scalar or 1-D ndarray + Denominator coefficients of the filter. + x : ndarray + Data to be filtered. + axis : int, optional + Axis of `x` to be filtered. Default is -1. + irlen : int or None, optional + The length of the nonnegligible part of the impulse response. + If `irlen` is None, or if the length of the signal is less than + ``2 * irlen``, then no part of the impulse response is ignored. + + Returns + ------- + y : ndarray + The filtered data. + x0 : ndarray + Initial condition for the forward filter. + x1 : ndarray + Initial condition for the backward filter. + + Notes + ----- + Typically the return values `x0` and `x1` are not needed by the + caller. The intended use of these return values is in unit tests. + + References + ---------- + .. [1] F. Gustaffson. Determining the initial states in forward-backward + filtering. Transactions on Signal Processing, 46(4):988-992, 1996. + + """ + # In the comments, "Gustafsson's paper" and [1] refer to the + # paper referenced in the docstring. + + b = np.atleast_1d(b) + a = np.atleast_1d(a) + + order = max(len(b), len(a)) - 1 + if order == 0: + # The filter is just scalar multiplication, with no state. + scale = (b[0] / a[0])**2 + y = scale * x + return y, np.array([]), np.array([]) + + if axis != -1 or axis != x.ndim - 1: + # Move the axis containing the data to the end. + x = np.swapaxes(x, axis, x.ndim - 1) + + # n is the number of samples in the data to be filtered. + n = x.shape[-1] + + if irlen is None or n <= 2*irlen: + m = n + else: + m = irlen + + # Create Obs, the observability matrix (called O in the paper). + # This matrix can be interpreted as the operator that propagates + # an arbitrary initial state to the output, assuming the input is + # zero. + # In Gustafsson's paper, the forward and backward filters are not + # necessarily the same, so he has both O_f and O_b. We use the same + # filter in both directions, so we only need O. The same comment + # applies to S below. + Obs = np.zeros((m, order)) + zi = np.zeros(order) + zi[0] = 1 + Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0] + for k in range(1, order): + Obs[k:, k] = Obs[:-k, 0] + + # Obsr is O^R (Gustafsson's notation for row-reversed O) + Obsr = Obs[::-1] + + # Create S. S is the matrix that applies the filter to the reversed + # propagated initial conditions. That is, + # out = S.dot(zi) + # is the same as + # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs. + # out = lfilter(b, a, tmp[::-1]) # Reverse and filter. + + # Equations (5) & (6) of [1] + S = lfilter(b, a, Obs[::-1], axis=0) + + # Sr is S^R (row-reversed S) + Sr = S[::-1] + + # M is [(S^R - O), (O^R - S)] + if m == n: + M = np.hstack((Sr - Obs, Obsr - S)) + else: + # Matrix described in section IV of [1]. + M = np.zeros((2*m, 2*order)) + M[:m, :order] = Sr - Obs + M[m:, order:] = Obsr - S + + # Naive forward-backward and backward-forward filters. + # These have large transients because the filters use zero initial + # conditions. + y_f = lfilter(b, a, x) + y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1] + + y_b = lfilter(b, a, x[..., ::-1])[..., ::-1] + y_bf = lfilter(b, a, y_b) + + delta_y_bf_fb = y_bf - y_fb + if m == n: + delta = delta_y_bf_fb + else: + start_m = delta_y_bf_fb[..., :m] + end_m = delta_y_bf_fb[..., -m:] + delta = np.concatenate((start_m, end_m), axis=-1) + + # ic_opt holds the "optimal" initial conditions. + # The following code computes the result shown in the formula + # of the paper between equations (6) and (7). + if delta.ndim == 1: + ic_opt = linalg.lstsq(M, delta)[0] + else: + # Reshape delta so it can be used as an array of multiple + # right-hand-sides in linalg.lstsq. + delta2d = delta.reshape(-1, delta.shape[-1]).T + ic_opt0 = linalg.lstsq(M, delta2d)[0].T + ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],)) + + # Now compute the filtered signal using equation (7) of [1]. + # First, form [S^R, O^R] and call it W. + if m == n: + W = np.hstack((Sr, Obsr)) + else: + W = np.zeros((2*m, 2*order)) + W[:m, :order] = Sr + W[m:, order:] = Obsr + + # Equation (7) of [1] says + # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt] + # `wic` is (almost) the product on the right. + # W has shape (m, 2*order), and ic_opt has shape (..., 2*order), + # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T, + # so wic has shape (..., m). + wic = ic_opt.dot(W.T) + + # `wic` is "almost" the product of W and the optimal ICs in equation + # (7)--if we're using a truncated impulse response (m < n), `wic` + # contains only the adjustments required for the ends of the signal. + # Here we form y_opt, taking this into account if necessary. + y_opt = y_fb + if m == n: + y_opt += wic + else: + y_opt[..., :m] += wic[..., :m] + y_opt[..., -m:] += wic[..., -m:] + + x0 = ic_opt[..., :order] + x1 = ic_opt[..., -order:] + if axis != -1 or axis != x.ndim - 1: + # Restore the data axis to its original position. + x0 = np.swapaxes(x0, axis, x.ndim - 1) + x1 = np.swapaxes(x1, axis, x.ndim - 1) + y_opt = np.swapaxes(y_opt, axis, x.ndim - 1) + + return y_opt, x0, x1 + + +def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad', + irlen=None): + """ + Apply a digital filter forward and backward to a signal. + + This function applies a linear digital filter twice, once forward and + once backwards. The combined filter has zero phase and a filter order + twice that of the original. + + The function provides options for handling the edges of the signal. + + The function `sosfiltfilt` (and filter design using ``output='sos'``) + should be preferred over `filtfilt` for most filtering tasks, as + second-order sections have fewer numerical problems. + + Parameters + ---------- + b : (N,) array_like + The numerator coefficient vector of the filter. + a : (N,) array_like + The denominator coefficient vector of the filter. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is ``3 * max(len(a), len(b))``. + method : str, optional + Determines the method for handling the edges of the signal, either + "pad" or "gust". When `method` is "pad", the signal is padded; the + type of padding is determined by `padtype` and `padlen`, and `irlen` + is ignored. When `method` is "gust", Gustafsson's method is used, + and `padtype` and `padlen` are ignored. + irlen : int or None, optional + When `method` is "gust", `irlen` specifies the length of the + impulse response of the filter. If `irlen` is None, no part + of the impulse response is ignored. For a long signal, specifying + `irlen` can significantly improve the performance of the filter. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt + + Notes + ----- + When `method` is "pad", the function pads the data along the given axis + in one of three ways: odd, even or constant. The odd and even extensions + have the corresponding symmetry about the end point of the data. The + constant extension extends the data with the values at the end points. On + both the forward and backward passes, the initial condition of the + filter is found by using `lfilter_zi` and scaling it by the end point of + the extended data. + + When `method` is "gust", Gustafsson's method [1]_ is used. Initial + conditions are chosen for the forward and backward passes so that the + forward-backward filter gives the same result as the backward-forward + filter. + + The option to use Gustaffson's method was added in scipy version 0.16.0. + + References + ---------- + .. [1] F. Gustaffson, "Determining the initial states in forward-backward + filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992, + 1996. + + Examples + -------- + The examples will use several functions from `scipy.signal`. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + First we create a one second signal that is the sum of two pure sine + waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz. + + >>> t = np.linspace(0, 1.0, 2001) + >>> xlow = np.sin(2 * np.pi * 5 * t) + >>> xhigh = np.sin(2 * np.pi * 250 * t) + >>> x = xlow + xhigh + + Now create a lowpass Butterworth filter with a cutoff of 0.125 times + the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`. + The result should be approximately ``xlow``, with no phase shift. + + >>> b, a = signal.butter(8, 0.125) + >>> y = signal.filtfilt(b, a, x, padlen=150) + >>> np.abs(y - xlow).max() + 9.1086182074789912e-06 + + We get a fairly clean result for this artificial example because + the odd extension is exact, and with the moderately long padding, + the filter's transients have dissipated by the time the actual data + is reached. In general, transient effects at the edges are + unavoidable. + + The following example demonstrates the option ``method="gust"``. + + First, create a filter. + + >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied. + + `sig` is a random input signal to be filtered. + + >>> rng = np.random.default_rng() + >>> n = 60 + >>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum() + + Apply `filtfilt` to `sig`, once using the Gustafsson method, and + once using padding, and plot the results for comparison. + + >>> fgust = signal.filtfilt(b, a, sig, method="gust") + >>> fpad = signal.filtfilt(b, a, sig, padlen=50) + >>> plt.plot(sig, 'k-', label='input') + >>> plt.plot(fgust, 'b-', linewidth=4, label='gust') + >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad') + >>> plt.legend(loc='best') + >>> plt.show() + + The `irlen` argument can be used to improve the performance + of Gustafsson's method. + + Estimate the impulse response length of the filter. + + >>> z, p, k = signal.tf2zpk(b, a) + >>> eps = 1e-9 + >>> r = np.max(np.abs(p)) + >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + >>> approx_impulse_len + 137 + + Apply the filter to a longer signal, with and without the `irlen` + argument. The difference between `y1` and `y2` is small. For long + signals, using `irlen` gives a significant performance improvement. + + >>> x = rng.standard_normal(4000) + >>> y1 = signal.filtfilt(b, a, x, method='gust') + >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len) + >>> print(np.max(np.abs(y1 - y2))) + 2.875334415008979e-10 + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + x = np.asarray(x) + + if method not in ["pad", "gust"]: + raise ValueError("method must be 'pad' or 'gust'.") + + if method == "gust": + y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + return y + + # method == "pad" + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=max(len(a), len(b))) + + # Get the steady state of the filter's step response. + zi = lfilter_zi(b, a) + + # Reshape zi and create x0 so that zi*x0 broadcasts + # to the correct value for the 'zi' keyword argument + # to lfilter. + zi_shape = [1] * x.ndim + zi_shape[axis] = zi.size + zi = np.reshape(zi, zi_shape) + x0 = axis_slice(ext, stop=1, axis=axis) + + # Forward filter. + (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0) + + # Backward filter. + # Create y0 so zi*y0 broadcasts appropriately. + y0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0) + + # Reverse y. + y = axis_reverse(y, axis=axis) + + if edge > 0: + # Slice the actual signal from the extended signal. + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + + return y + + +def _validate_pad(padtype, padlen, x, axis, ntaps): + """Helper to validate padding for filtfilt""" + if padtype not in ['even', 'odd', 'constant', None]: + raise ValueError(("Unknown value '%s' given to padtype. padtype " + "must be 'even', 'odd', 'constant', or None.") % + padtype) + + if padtype is None: + padlen = 0 + + if padlen is None: + # Original padding; preserved for backwards compatibility. + edge = ntaps * 3 + else: + edge = padlen + + # x's 'axis' dimension must be bigger than edge. + if x.shape[axis] <= edge: + raise ValueError("The length of the input vector x must be greater " + "than padlen, which is %d." % edge) + + if padtype is not None and edge > 0: + # Make an extension of length `edge` at each + # end of the input array. + if padtype == 'even': + ext = even_ext(x, edge, axis=axis) + elif padtype == 'odd': + ext = odd_ext(x, edge, axis=axis) + else: + ext = const_ext(x, edge, axis=axis) + else: + ext = x + return edge, ext + + +def _validate_x(x): + x = np.asarray(x) + if x.ndim == 0: + raise ValueError('x must be at least 1-D') + return x + + +def sosfilt(sos, x, axis=-1, zi=None): + """ + Filter data along one dimension using cascaded second-order sections. + + Filter a data sequence, `x`, using a digital IIR filter defined by + `sos`. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the cascaded filter delays. It is a (at + least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where + ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` + replaced by 2. If `zi` is None or is not given then initial rest + (i.e. all zeros) is assumed. + Note that these initial conditions are *not* the same as the initial + conditions given by `lfiltic` or `lfilter_zi`. + + Returns + ------- + y : ndarray + The output of the digital filter. + zf : ndarray, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz + + Notes + ----- + The filter function is implemented as a series of second-order filters + with direct-form II transposed structure. It is designed to minimize + numerical precision errors for high-order filters. + + .. versionadded:: 0.16.0 + + Examples + -------- + Plot a 13th-order filter's impulse response using both `lfilter` and + `sosfilt`, showing the instability that results from trying to do a + 13th-order filter in a single stage (the numerical error pushes some poles + outside of the unit circle): + + >>> import matplotlib.pyplot as plt + >>> from scipy import signal + >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba') + >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos') + >>> x = signal.unit_impulse(700) + >>> y_tf = signal.lfilter(b, a, x) + >>> y_sos = signal.sosfilt(sos, x) + >>> plt.plot(y_tf, 'r', label='TF') + >>> plt.plot(y_sos, 'k', label='SOS') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + x = _validate_x(x) + sos, n_sections = _validate_sos(sos) + x_zi_shape = list(x.shape) + x_zi_shape[axis] = 2 + x_zi_shape = tuple([n_sections] + x_zi_shape) + inputs = [sos, x] + if zi is not None: + inputs.append(np.asarray(zi)) + dtype = np.result_type(*inputs) + if dtype.char not in 'fdgFDGO': + raise NotImplementedError("input type '%s' not supported" % dtype) + if zi is not None: + zi = np.array(zi, dtype) # make a copy so that we can operate in place + if zi.shape != x_zi_shape: + raise ValueError('Invalid zi shape. With axis=%r, an input with ' + 'shape %r, and an sos array with %d sections, zi ' + 'must have shape %r, got %r.' % + (axis, x.shape, n_sections, x_zi_shape, zi.shape)) + return_zi = True + else: + zi = np.zeros(x_zi_shape, dtype=dtype) + return_zi = False + axis = axis % x.ndim # make positive + x = np.moveaxis(x, axis, -1) + zi = np.moveaxis(zi, [0, axis + 1], [-2, -1]) + x_shape, zi_shape = x.shape, zi.shape + x = np.reshape(x, (-1, x.shape[-1])) + x = np.array(x, dtype, order='C') # make a copy, can modify in place + zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2))) + sos = sos.astype(dtype, copy=False) + _sosfilt(sos, x, zi) + x.shape = x_shape + x = np.moveaxis(x, -1, axis) + if return_zi: + zi.shape = zi_shape + zi = np.moveaxis(zi, [-2, -1], [0, axis + 1]) + out = (x, zi) + else: + out = x + return out + + +def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None): + """ + A forward-backward digital filter using cascaded second-order sections. + + See `filtfilt` for more complete information about this method. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is:: + + 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(), + (sos[:, 5] == 0).sum())) + + The extra subtraction at the end attempts to compensate for poles + and zeros at the origin (e.g. for odd-order filters) to yield + equivalent estimates of `padlen` to those of `filtfilt` for + second-order section filters built with `scipy.signal` functions. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + filtfilt, sosfilt, sosfilt_zi, sosfreqz + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import sosfiltfilt, butter + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Create an interesting signal to filter. + + >>> n = 201 + >>> t = np.linspace(0, 1, n) + >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n) + + Create a lowpass Butterworth filter, and use it to filter `x`. + + >>> sos = butter(4, 0.125, output='sos') + >>> y = sosfiltfilt(sos, x) + + For comparison, apply an 8th order filter using `sosfilt`. The filter + is initialized using the mean of the first four values of `x`. + + >>> from scipy.signal import sosfilt, sosfilt_zi + >>> sos8 = butter(8, 0.125, output='sos') + >>> zi = x[:4].mean() * sosfilt_zi(sos8) + >>> y2, zo = sosfilt(sos8, x, zi=zi) + + Plot the results. Note that the phase of `y` matches the input, while + `y2` has a significant phase delay. + + >>> plt.plot(t, x, alpha=0.5, label='x(t)') + >>> plt.plot(t, y, label='y(t)') + >>> plt.plot(t, y2, label='y2(t)') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.xlabel('t') + >>> plt.show() + + """ + sos, n_sections = _validate_sos(sos) + x = _validate_x(x) + + # `method` is "pad"... + ntaps = 2 * n_sections + 1 + ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum()) + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=ntaps) + + # These steps follow the same form as filtfilt with modifications + zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...) + zi_shape = [1] * x.ndim + zi_shape[axis] = 2 + zi.shape = [n_sections] + zi_shape + x_0 = axis_slice(ext, stop=1, axis=axis) + (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0) + y_0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0) + y = axis_reverse(y, axis=axis) + if edge > 0: + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + return y + + +def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True): + """ + Downsample the signal after applying an anti-aliasing filter. + + By default, an order 8 Chebyshev type I filter is used. A 30 point FIR + filter with Hamming window is used if `ftype` is 'fir'. + + Parameters + ---------- + x : array_like + The signal to be downsampled, as an N-dimensional array. + q : int + The downsampling factor. When using IIR downsampling, it is recommended + to call `decimate` multiple times for downsampling factors higher than + 13. + n : int, optional + The order of the filter (1 less than the length for 'fir'). Defaults to + 8 for 'iir' and 20 times the downsampling factor for 'fir'. + ftype : str {'iir', 'fir'} or ``dlti`` instance, optional + If 'iir' or 'fir', specifies the type of lowpass filter. If an instance + of an `dlti` object, uses that object to filter before downsampling. + axis : int, optional + The axis along which to decimate. + zero_phase : bool, optional + Prevent phase shift by filtering with `filtfilt` instead of `lfilter` + when using an IIR filter, and shifting the outputs back by the filter's + group delay when using an FIR filter. The default value of ``True`` is + recommended, since a phase shift is generally not desired. + + .. versionadded:: 0.18.0 + + Returns + ------- + y : ndarray + The down-sampled signal. + + See Also + -------- + resample : Resample up or down using the FFT method. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The ``zero_phase`` keyword was added in 0.18.0. + The possibility to use instances of ``dlti`` as ``ftype`` was added in + 0.18.0. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Define wave parameters. + + >>> wave_duration = 3 + >>> sample_rate = 100 + >>> freq = 2 + >>> q = 5 + + Calculate number of samples. + + >>> samples = wave_duration*sample_rate + >>> samples_decimated = int(samples/q) + + Create cosine wave. + + >>> x = np.linspace(0, wave_duration, samples, endpoint=False) + >>> y = np.cos(x*np.pi*freq*2) + + Decimate cosine wave. + + >>> ydem = signal.decimate(y, q) + >>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False) + + Plot original and decimated waves. + + >>> plt.plot(x, y, '.-', xnew, ydem, 'o-') + >>> plt.xlabel('Time, Seconds') + >>> plt.legend(['data', 'decimated'], loc='best') + >>> plt.show() + + """ + + x = np.asarray(x) + q = operator.index(q) + + if n is not None: + n = operator.index(n) + + result_type = x.dtype + if not np.issubdtype(result_type, np.inexact) \ + or result_type.type == np.float16: + # upcast integers and float16 to float64 + result_type = np.float64 + + if ftype == 'fir': + if n is None: + half_len = 10 * q # reasonable cutoff for our sinc-like function + n = 2 * half_len + b, a = firwin(n+1, 1. / q, window='hamming'), 1. + b = np.asarray(b, dtype=result_type) + a = np.asarray(a, dtype=result_type) + elif ftype == 'iir': + iir_use_sos = True + if n is None: + n = 8 + sos = cheby1(n, 0.05, 0.8 / q, output='sos') + sos = np.asarray(sos, dtype=result_type) + elif isinstance(ftype, dlti): + system = ftype._as_zpk() + if system.poles.shape[0] == 0: + # FIR + system = ftype._as_tf() + b, a = system.num, system.den + ftype = 'fir' + elif (any(np.iscomplex(system.poles)) + or any(np.iscomplex(system.poles)) + or np.iscomplex(system.gain)): + # sosfilt & sosfiltfilt don't handle complex coeffs + iir_use_sos = False + system = ftype._as_tf() + b, a = system.num, system.den + else: + iir_use_sos = True + sos = zpk2sos(system.zeros, system.poles, system.gain) + sos = np.asarray(sos, dtype=result_type) + else: + raise ValueError('invalid ftype') + + sl = [slice(None)] * x.ndim + + if ftype == 'fir': + b = b / a + if zero_phase: + y = resample_poly(x, 1, q, axis=axis, window=b) + else: + # upfirdn is generally faster than lfilter by a factor equal to the + # downsampling factor, since it only calculates the needed outputs + n_out = x.shape[axis] // q + bool(x.shape[axis] % q) + y = upfirdn(b, x, up=1, down=q, axis=axis) + sl[axis] = slice(None, n_out, None) + + else: # IIR case + if zero_phase: + if iir_use_sos: + y = sosfiltfilt(sos, x, axis=axis) + else: + y = filtfilt(b, a, x, axis=axis) + else: + if iir_use_sos: + y = sosfilt(sos, x, axis=axis) + else: + y = lfilter(b, a, x, axis=axis) + + sl[axis] = slice(None, None, q) + + return y[tuple(sl)] diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..16f4360de903dca545e5ddcac66805e3d809c4bd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3a68984673eb56beed3dd527f4364f0f940f9ac5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_upfirdn.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_upfirdn.py new file mode 100644 index 0000000000000000000000000000000000000000..d64cc142ff194b1404e380507289ddbaffab3359 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_upfirdn.py @@ -0,0 +1,216 @@ +# Code adapted from "upfirdn" python library with permission: +# +# Copyright (c) 2009, Motorola, Inc +# +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of Motorola nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np + +from ._upfirdn_apply import _output_len, _apply, mode_enum + +__all__ = ['upfirdn', '_output_len'] + +_upfirdn_modes = [ + 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect', + 'antisymmetric', 'antireflect', 'line', +] + + +def _pad_h(h, up): + """Store coefficients in a transposed, flipped arrangement. + + For example, suppose upRate is 3, and the + input number of coefficients is 10, represented as h[0], ..., h[9]. + + Then the internal buffer will look like this:: + + h[9], h[6], h[3], h[0], // flipped phase 0 coefs + 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) + 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) + + """ + h_padlen = len(h) + (-len(h) % up) + h_full = np.zeros(h_padlen, h.dtype) + h_full[:len(h)] = h + h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() + return h_full + + +def _check_mode(mode): + mode = mode.lower() + enum = mode_enum(mode) + return enum + + +class _UpFIRDn: + """Helper for resampling.""" + + def __init__(self, h, x_dtype, up, down): + h = np.asarray(h) + if h.ndim != 1 or h.size == 0: + raise ValueError('h must be 1-D with non-zero length') + self._output_type = np.result_type(h.dtype, x_dtype, np.float32) + h = np.asarray(h, self._output_type) + self._up = int(up) + self._down = int(down) + if self._up < 1 or self._down < 1: + raise ValueError('Both up and down must be >= 1') + # This both transposes, and "flips" each phase for filtering + self._h_trans_flip = _pad_h(h, self._up) + self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) + self._h_len_orig = len(h) + + def apply_filter(self, x, axis=-1, mode='constant', cval=0): + """Apply the prepared filter to the specified axis of N-D signal x.""" + output_len = _output_len(self._h_len_orig, x.shape[axis], + self._up, self._down) + # Explicit use of np.int64 for output_shape dtype avoids OverflowError + # when allocating large array on platforms where intp is 32 bits. + output_shape = np.asarray(x.shape, dtype=np.int64) + output_shape[axis] = output_len + out = np.zeros(output_shape, dtype=self._output_type, order='C') + axis = axis % x.ndim + mode = _check_mode(mode) + _apply(np.asarray(x, self._output_type), + self._h_trans_flip, out, + self._up, self._down, axis, mode, cval) + return out + + +def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0): + """Upsample, FIR filter, and downsample. + + Parameters + ---------- + h : array_like + 1-D FIR (finite-impulse response) filter coefficients. + x : array_like + Input signal array. + up : int, optional + Upsampling rate. Default is 1. + down : int, optional + Downsampling rate. Default is 1. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + mode : str, optional + The signal extension mode to use. The set + ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to + modes provided by `numpy.pad`. ``"smooth"`` implements a smooth + extension by extending based on the slope of the last 2 points at each + end of the array. ``"antireflect"`` and ``"antisymmetric"`` are + anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode + `"line"` extends the signal based on a linear trend defined by the + first and last points along the ``axis``. + + .. versionadded:: 1.4.0 + cval : float, optional + The constant value to use when ``mode == "constant"``. + + .. versionadded:: 1.4.0 + + Returns + ------- + y : ndarray + The output signal array. Dimensions will be the same as `x` except + for along `axis`, which will change size according to the `h`, + `up`, and `down` parameters. + + Notes + ----- + The algorithm is an implementation of the block diagram shown on page 129 + of the Vaidyanathan text [1]_ (Figure 4.3-8d). + + The direct approach of upsampling by factor of P with zero insertion, + FIR filtering of length ``N``, and downsampling by factor of Q is + O(N*Q) per output sample. The polyphase implementation used here is + O(N/P). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, + Prentice Hall, 1993. + + Examples + -------- + Simple operations: + + >>> import numpy as np + >>> from scipy.signal import upfirdn + >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter + array([ 1., 2., 3., 2., 1.]) + >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion + array([ 1., 0., 0., 2., 0., 0., 3.]) + >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold + array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) + >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation + array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5]) + >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 + array([ 0., 3., 6., 9.]) + >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 + array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5]) + + Apply a single filter to multiple signals: + + >>> x = np.reshape(np.arange(8), (4, 2)) + >>> x + array([[0, 1], + [2, 3], + [4, 5], + [6, 7]]) + + Apply along the last dimension of ``x``: + + >>> h = [1, 1] + >>> upfirdn(h, x, 2) + array([[ 0., 0., 1., 1.], + [ 2., 2., 3., 3.], + [ 4., 4., 5., 5.], + [ 6., 6., 7., 7.]]) + + Apply along the 0th dimension of ``x``: + + >>> upfirdn(h, x, 2, axis=0) + array([[ 0., 1.], + [ 0., 1.], + [ 2., 3.], + [ 2., 3.], + [ 4., 5.], + [ 4., 5.], + [ 6., 7.], + [ 6., 7.]]) + """ + x = np.asarray(x) + ufd = _UpFIRDn(h, x.dtype, up, down) + # This is equivalent to (but faster than) using np.apply_along_axis + return ufd.apply_filter(x, axis, mode, cval) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..de925c6b66f78d8d1be4507d8489f6250c4f87f3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_waveforms.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..55bd045cdbf23a0686c25eaf4256897b940f6fd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_waveforms.py @@ -0,0 +1,672 @@ +# Author: Travis Oliphant +# 2003 +# +# Feb. 2010: Updated by Warren Weckesser: +# Rewrote much of chirp() +# Added sweep_poly() +import numpy as np +from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ + exp, cos, sin, polyval, polyint + + +__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse'] + + +def sawtooth(t, width=1): + """ + Return a periodic sawtooth or triangle waveform. + + The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the + interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval + ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + Time. + width : array_like, optional + Width of the rising ramp as a proportion of the total cycle. + Default is 1, producing a rising ramp, while 0 produces a falling + ramp. `width` = 0.5 produces a triangle wave. + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the sawtooth waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500) + >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) + + """ + t, w = asarray(t), asarray(width) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # take t modulo 2*pi + tmod = mod(t, 2 * pi) + + # on the interval 0 to width*2*pi function is + # tmod / (pi*w) - 1 + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + tsub = extract(mask2, tmod) + wsub = extract(mask2, w) + place(y, mask2, tsub / (pi * wsub) - 1) + + # on the interval width*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + + mask3 = (1 - mask1) & (1 - mask2) + tsub = extract(mask3, tmod) + wsub = extract(mask3, w) + place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) + return y + + +def square(t, duty=0.5): + """ + Return a periodic square-wave waveform. + + The square wave has a period ``2*pi``, has value +1 from 0 to + ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in + the interval [0,1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + The input time array. + duty : array_like, optional + Duty cycle. Default is 0.5 (50% duty cycle). + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the square waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500, endpoint=False) + >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) + >>> plt.ylim(-2, 2) + + A pulse-width modulated sine wave: + + >>> plt.figure() + >>> sig = np.sin(2 * np.pi * t) + >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, sig) + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, pwm) + >>> plt.ylim(-1.5, 1.5) + + """ + t, w = asarray(t), asarray(duty) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # on the interval 0 to duty*2*pi function is 1 + tmod = mod(t, 2 * pi) + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + place(y, mask2, 1) + + # on the interval duty*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + mask3 = (1 - mask1) & (1 - mask2) + place(y, mask3, -1) + return y + + +def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, + retenv=False): + """ + Return a Gaussian modulated sinusoid: + + ``exp(-a t^2) exp(1j*2*pi*fc*t).`` + + If `retquad` is True, then return the real and imaginary parts + (in-phase and quadrature). + If `retenv` is True, then return the envelope (unmodulated signal). + Otherwise, return the real part of the modulated sinusoid. + + Parameters + ---------- + t : ndarray or the string 'cutoff' + Input array. + fc : float, optional + Center frequency (e.g. Hz). Default is 1000. + bw : float, optional + Fractional bandwidth in frequency domain of pulse (e.g. Hz). + Default is 0.5. + bwr : float, optional + Reference level at which fractional bandwidth is calculated (dB). + Default is -6. + tpr : float, optional + If `t` is 'cutoff', then the function returns the cutoff + time for when the pulse amplitude falls below `tpr` (in dB). + Default is -60. + retquad : bool, optional + If True, return the quadrature (imaginary) as well as the real part + of the signal. Default is False. + retenv : bool, optional + If True, return the envelope of the signal. Default is False. + + Returns + ------- + yI : ndarray + Real part of signal. Always returned. + yQ : ndarray + Imaginary part of signal. Only returned if `retquad` is True. + yenv : ndarray + Envelope of signal. Only returned if `retenv` is True. + + See Also + -------- + scipy.signal.morlet + + Examples + -------- + Plot real component, imaginary component, and envelope for a 5 Hz pulse, + sampled at 100 Hz for 2 seconds: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) + >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) + >>> plt.plot(t, i, t, q, t, e, '--') + + """ + if fc < 0: + raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) + if bw <= 0: + raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) + if bwr >= 0: + raise ValueError("Reference level for bandwidth (bwr=%.2f) must " + "be < 0 dB" % bwr) + + # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) + + ref = pow(10.0, bwr / 20.0) + # fdel = fc*bw/2: g(fdel) = ref --- solve this for a + # + # pi^2/a * fc^2 * bw^2 /4=-log(ref) + a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) + + if isinstance(t, str): + if t == 'cutoff': # compute cut_off point + # Solve exp(-a tc**2) = tref for tc + # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) + if tpr >= 0: + raise ValueError("Reference level for time cutoff must " + "be < 0 dB") + tref = pow(10.0, tpr / 20.0) + return sqrt(-log(tref) / a) + else: + raise ValueError("If `t` is a string, it must be 'cutoff'") + + yenv = exp(-a * t * t) + yI = yenv * cos(2 * pi * fc * t) + yQ = yenv * sin(2 * pi * fc * t) + if not retquad and not retenv: + return yI + if not retquad and retenv: + return yI, yenv + if retquad and not retenv: + return yI, yQ + if retquad and retenv: + return yI, yQ, yenv + + +def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True): + """Frequency-swept cosine generator. + + In the following, 'Hz' should be interpreted as 'cycles per unit'; + there is no requirement here that the unit is one second. The + important distinction is that the units of rotation are cycles, not + radians. Likewise, `t` could be a measurement of space instead of time. + + Parameters + ---------- + t : array_like + Times at which to evaluate the waveform. + f0 : float + Frequency (e.g. Hz) at time t=0. + t1 : float + Time at which `f1` is specified. + f1 : float + Frequency (e.g. Hz) of the waveform at time `t1`. + method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional + Kind of frequency sweep. If not given, `linear` is assumed. See + Notes below for more details. + phi : float, optional + Phase offset, in degrees. Default is 0. + vertex_zero : bool, optional + This parameter is only used when `method` is 'quadratic'. + It determines whether the vertex of the parabola that is the graph + of the frequency is at t=0 or t=t1. + + Returns + ------- + y : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral + (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below. + + See Also + -------- + sweep_poly + + Notes + ----- + There are four options for the `method`. The following formulas give + the instantaneous frequency (in Hz) of the signal generated by + `chirp()`. For convenience, the shorter names shown below may also be + used. + + linear, lin, li: + + ``f(t) = f0 + (f1 - f0) * t / t1`` + + quadratic, quad, q: + + The graph of the frequency f(t) is a parabola through (0, f0) and + (t1, f1). By default, the vertex of the parabola is at (0, f0). + If `vertex_zero` is False, then the vertex is at (t1, f1). The + formula is: + + if vertex_zero is True: + + ``f(t) = f0 + (f1 - f0) * t**2 / t1**2`` + + else: + + ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2`` + + To use a more general quadratic function, or an arbitrary + polynomial, use the function `scipy.signal.sweep_poly`. + + logarithmic, log, lo: + + ``f(t) = f0 * (f1/f0)**(t/t1)`` + + f0 and f1 must be nonzero and have the same sign. + + This signal is also known as a geometric or exponential chirp. + + hyperbolic, hyp: + + ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)`` + + f0 and f1 must be nonzero. + + Examples + -------- + The following will be used in the examples: + + >>> import numpy as np + >>> from scipy.signal import chirp, spectrogram + >>> import matplotlib.pyplot as plt + + For the first example, we'll plot the waveform for a linear chirp + from 6 Hz to 1 Hz over 10 seconds: + + >>> t = np.linspace(0, 10, 1500) + >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear') + >>> plt.plot(t, w) + >>> plt.title("Linear Chirp, f(0)=6, f(10)=1") + >>> plt.xlabel('t (sec)') + >>> plt.show() + + For the remaining examples, we'll use higher frequency ranges, + and demonstrate the result using `scipy.signal.spectrogram`. + We'll use a 4 second interval sampled at 7200 Hz. + + >>> fs = 7200 + >>> T = 4 + >>> t = np.arange(0, int(T*fs)) / fs + + We'll use this function to plot the spectrogram in each example. + + >>> def plot_spectrogram(title, w, fs): + ... ff, tt, Sxx = spectrogram(w, fs=fs, nperseg=256, nfft=576) + ... fig, ax = plt.subplots() + ... ax.pcolormesh(tt, ff[:145], Sxx[:145], cmap='gray_r', + ... shading='gouraud') + ... ax.set_title(title) + ... ax.set_xlabel('t (sec)') + ... ax.set_ylabel('Frequency (Hz)') + ... ax.grid(True) + ... + + Quadratic chirp from 1500 Hz to 250 Hz + (vertex of the parabolic curve of the frequency is at t=0): + + >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic') + >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250', w, fs) + >>> plt.show() + + Quadratic chirp from 1500 Hz to 250 Hz + (vertex of the parabolic curve of the frequency is at t=T): + + >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic', + ... vertex_zero=False) + >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250\\n' + + ... '(vertex_zero=False)', w, fs) + >>> plt.show() + + Logarithmic chirp from 1500 Hz to 250 Hz: + + >>> w = chirp(t, f0=1500, f1=250, t1=T, method='logarithmic') + >>> plot_spectrogram(f'Logarithmic Chirp, f(0)=1500, f({T})=250', w, fs) + >>> plt.show() + + Hyperbolic chirp from 1500 Hz to 250 Hz: + + >>> w = chirp(t, f0=1500, f1=250, t1=T, method='hyperbolic') + >>> plot_spectrogram(f'Hyperbolic Chirp, f(0)=1500, f({T})=250', w, fs) + >>> plt.show() + + """ + # 'phase' is computed in _chirp_phase, to make testing easier. + phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) + # Convert phi to radians. + phi *= pi / 180 + return cos(phase + phi) + + +def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): + """ + Calculate the phase used by `chirp` to generate its output. + + See `chirp` for a description of the arguments. + + """ + t = asarray(t) + f0 = float(f0) + t1 = float(t1) + f1 = float(f1) + if method in ['linear', 'lin', 'li']: + beta = (f1 - f0) / t1 + phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) + + elif method in ['quadratic', 'quad', 'q']: + beta = (f1 - f0) / (t1 ** 2) + if vertex_zero: + phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) + else: + phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) + + elif method in ['logarithmic', 'log', 'lo']: + if f0 * f1 <= 0.0: + raise ValueError("For a logarithmic chirp, f0 and f1 must be " + "nonzero and have the same sign.") + if f0 == f1: + phase = 2 * pi * f0 * t + else: + beta = t1 / log(f1 / f0) + phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) + + elif method in ['hyperbolic', 'hyp']: + if f0 == 0 or f1 == 0: + raise ValueError("For a hyperbolic chirp, f0 and f1 must be " + "nonzero.") + if f0 == f1: + # Degenerate case: constant frequency. + phase = 2 * pi * f0 * t + else: + # Singular point: the instantaneous frequency blows up + # when t == sing. + sing = -f1 * t1 / (f0 - f1) + phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) + + else: + raise ValueError("method must be 'linear', 'quadratic', 'logarithmic'," + " or 'hyperbolic', but a value of %r was given." + % method) + + return phase + + +def sweep_poly(t, poly, phi=0): + """ + Frequency-swept cosine generator, with a time-dependent frequency. + + This function generates a sinusoidal function whose instantaneous + frequency varies with time. The frequency at time `t` is given by + the polynomial `poly`. + + Parameters + ---------- + t : ndarray + Times at which to evaluate the waveform. + poly : 1-D array_like or instance of numpy.poly1d + The desired frequency expressed as a polynomial. If `poly` is + a list or ndarray of length n, then the elements of `poly` are + the coefficients of the polynomial, and the instantaneous + frequency is + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of numpy.poly1d, then the + instantaneous frequency is + + ``f(t) = poly(t)`` + + phi : float, optional + Phase offset, in degrees, Default: 0. + + Returns + ------- + sweep_poly : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral + (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. + + See Also + -------- + chirp + + Notes + ----- + .. versionadded:: 0.8.0 + + If `poly` is a list or ndarray of length `n`, then the elements of + `poly` are the coefficients of the polynomial, and the instantaneous + frequency is: + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of `numpy.poly1d`, then the instantaneous + frequency is: + + ``f(t) = poly(t)`` + + Finally, the output `s` is: + + ``cos(phase + (pi/180)*phi)`` + + where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, + ``f(t)`` as defined above. + + Examples + -------- + Compute the waveform with instantaneous frequency:: + + f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 + + over the interval 0 <= t <= 10. + + >>> import numpy as np + >>> from scipy.signal import sweep_poly + >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) + >>> t = np.linspace(0, 10, 5001) + >>> w = sweep_poly(t, p) + + Plot it: + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, w) + >>> plt.title("Sweep Poly\\nwith frequency " + + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, p(t), 'r', label='f(t)') + >>> plt.legend() + >>> plt.xlabel('t') + >>> plt.tight_layout() + >>> plt.show() + + """ + # 'phase' is computed in _sweep_poly_phase, to make testing easier. + phase = _sweep_poly_phase(t, poly) + # Convert to radians. + phi *= pi / 180 + return cos(phase + phi) + + +def _sweep_poly_phase(t, poly): + """ + Calculate the phase used by sweep_poly to generate its output. + + See `sweep_poly` for a description of the arguments. + + """ + # polyint handles lists, ndarrays and instances of poly1d automatically. + intpoly = polyint(poly) + phase = 2 * pi * polyval(intpoly, t) + return phase + + +def unit_impulse(shape, idx=None, dtype=float): + """ + Unit impulse signal (discrete delta function) or unit basis vector. + + Parameters + ---------- + shape : int or tuple of int + Number of samples in the output (1-D), or a tuple that represents the + shape of the output (N-D). + idx : None or int or tuple of int or 'mid', optional + Index at which the value is 1. If None, defaults to the 0th element. + If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in + all dimensions. If an int, the impulse will be at `idx` in all + dimensions. + dtype : data-type, optional + The desired data-type for the array, e.g., ``numpy.int8``. Default is + ``numpy.float64``. + + Returns + ------- + y : ndarray + Output array containing an impulse signal. + + Notes + ----- + The 1D case is also known as the Kronecker delta. + + .. versionadded:: 0.19.0 + + Examples + -------- + An impulse at the 0th element (:math:`\\delta[n]`): + + >>> from scipy import signal + >>> signal.unit_impulse(8) + array([ 1., 0., 0., 0., 0., 0., 0., 0.]) + + Impulse offset by 2 samples (:math:`\\delta[n-2]`): + + >>> signal.unit_impulse(7, 2) + array([ 0., 0., 1., 0., 0., 0., 0.]) + + 2-dimensional impulse, centered: + + >>> signal.unit_impulse((3, 3), 'mid') + array([[ 0., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 0.]]) + + Impulse at (2, 2), using broadcasting: + + >>> signal.unit_impulse((4, 4), 2) + array([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 1., 0.], + [ 0., 0., 0., 0.]]) + + Plot the impulse response of a 4th-order Butterworth lowpass filter: + + >>> imp = signal.unit_impulse(100, 'mid') + >>> b, a = signal.butter(4, 0.2) + >>> response = signal.lfilter(b, a, imp) + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-50, 50), imp) + >>> plt.plot(np.arange(-50, 50), response) + >>> plt.margins(0.1, 0.1) + >>> plt.xlabel('Time [samples]') + >>> plt.ylabel('Amplitude') + >>> plt.grid(True) + >>> plt.show() + + """ + out = zeros(shape, dtype) + + shape = np.atleast_1d(shape) + + if idx is None: + idx = (0,) * len(shape) + elif idx == 'mid': + idx = tuple(shape // 2) + elif not hasattr(idx, "__iter__"): + idx = (idx,) * len(shape) + + out[idx] = 1 + return out diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/_wavelets.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfc9f77d6f782c7b9af2d57576d55bf30d809f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/_wavelets.py @@ -0,0 +1,556 @@ +import warnings + +import numpy as np +from scipy.linalg import eig +from scipy.special import comb +from scipy.signal import convolve + +__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt'] + + +_msg="""scipy.signal.%s is deprecated in SciPy 1.12 and will be removed +in SciPy 1.15. We recommend using PyWavelets instead. +""" + + +def daub(p): + """ + The coefficients for the FIR low-pass filter producing Daubechies wavelets. + + .. deprecated:: 1.12.0 + + scipy.signal.daub is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + p>=1 gives the order of the zero at f=1/2. + There are 2p filter coefficients. + + Parameters + ---------- + p : int + Order of the zero at f=1/2, can have values from 1 to 34. + + Returns + ------- + daub : ndarray + Return + + """ + warnings.warn(_msg % 'daub', DeprecationWarning, stacklevel=2) + + sqrt = np.sqrt + if p < 1: + raise ValueError("p must be at least 1.") + if p == 1: + c = 1 / sqrt(2) + return np.array([c, c]) + elif p == 2: + f = sqrt(2) / 8 + c = sqrt(3) + return f * np.array([1 + c, 3 + c, 3 - c, 1 - c]) + elif p == 3: + tmp = 12 * sqrt(10) + z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6 + z1c = np.conj(z1) + f = sqrt(2) / 8 + d0 = np.real((1 - z1) * (1 - z1c)) + a0 = np.real(z1 * z1c) + a1 = 2 * np.real(z1) + return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1, + a0 - 3 * a1 + 3, 3 - a1, 1]) + elif p < 35: + # construct polynomial and factor it + if p < 35: + P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1] + yj = np.roots(P) + else: # try different polynomial --- needs work + P = [comb(p - 1 + k, k, exact=1) / 4.0**k + for k in range(p)][::-1] + yj = np.roots(P) / 4 + # for each root, compute two z roots, select the one with |z|>1 + # Build up final polynomial + c = np.poly1d([1, 1])**p + q = np.poly1d([1]) + for k in range(p - 1): + yval = yj[k] + part = 2 * sqrt(yval * (yval - 1)) + const = 1 - 2 * yval + z1 = const + part + if (abs(z1)) < 1: + z1 = const - part + q = q * [1, -z1] + + q = c * np.real(q) + # Normalize result + q = q / np.sum(q) * sqrt(2) + return q.c[::-1] + else: + raise ValueError("Polynomial factorization does not work " + "well for p too large.") + + +def qmf(hk): + """ + Return high-pass qmf filter from low-pass + + .. deprecated:: 1.12.0 + + scipy.signal.qmf is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + Parameters + ---------- + hk : array_like + Coefficients of high-pass filter. + + Returns + ------- + array_like + High-pass filter coefficients. + + """ + warnings.warn(_msg % 'qmf', DeprecationWarning, stacklevel=2) + + N = len(hk) - 1 + asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)] + return hk[::-1] * np.array(asgn) + + +def cascade(hk, J=7): + """ + Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients. + + .. deprecated:: 1.12.0 + + scipy.signal.cascade is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + Parameters + ---------- + hk : array_like + Coefficients of low-pass filter. + J : int, optional + Values will be computed at grid points ``K/2**J``. Default is 7. + + Returns + ------- + x : ndarray + The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where + ``len(hk) = len(gk) = N+1``. + phi : ndarray + The scaling function ``phi(x)`` at `x`: + ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N. + psi : ndarray, optional + The wavelet function ``psi(x)`` at `x`: + ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N. + `psi` is only returned if `gk` is not None. + + Notes + ----- + The algorithm uses the vector cascade algorithm described by Strang and + Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values + and slices for quick reuse. Then inserts vectors into final vector at the + end. + + """ + warnings.warn(_msg % 'cascade', DeprecationWarning, stacklevel=2) + + N = len(hk) - 1 + + if (J > 30 - np.log2(N + 1)): + raise ValueError("Too many levels.") + if (J < 1): + raise ValueError("Too few levels.") + + # construct matrices needed + nn, kk = np.ogrid[:N, :N] + s2 = np.sqrt(2) + # append a zero so that take works + thk = np.r_[hk, 0] + gk = qmf(hk) + tgk = np.r_[gk, 0] + + indx1 = np.clip(2 * nn - kk, -1, N + 1) + indx2 = np.clip(2 * nn - kk + 1, -1, N + 1) + m = np.empty((2, 2, N, N), 'd') + m[0, 0] = np.take(thk, indx1, 0) + m[0, 1] = np.take(thk, indx2, 0) + m[1, 0] = np.take(tgk, indx1, 0) + m[1, 1] = np.take(tgk, indx2, 0) + m *= s2 + + # construct the grid of points + x = np.arange(0, N * (1 << J), dtype=float) / (1 << J) + phi = 0 * x + + psi = 0 * x + + # find phi0, and phi1 + lam, v = eig(m[0, 0]) + ind = np.argmin(np.absolute(lam - 1)) + # a dictionary with a binary representation of the + # evaluation points x < 1 -- i.e. position is 0.xxxx + v = np.real(v[:, ind]) + # need scaling function to integrate to 1 so find + # eigenvector normalized to sum(v,axis=0)=1 + sm = np.sum(v) + if sm < 0: # need scaling function to integrate to 1 + v = -v + sm = -sm + bitdic = {'0': v / sm} + bitdic['1'] = np.dot(m[0, 1], bitdic['0']) + step = 1 << J + phi[::step] = bitdic['0'] + phi[(1 << (J - 1))::step] = bitdic['1'] + psi[::step] = np.dot(m[1, 0], bitdic['0']) + psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0']) + # descend down the levels inserting more and more values + # into bitdic -- store the values in the correct location once we + # have computed them -- stored in the dictionary + # for quicker use later. + prevkeys = ['1'] + for level in range(2, J + 1): + newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys] + fac = 1 << (J - level) + for key in newkeys: + # convert key to number + num = 0 + for pos in range(level): + if key[pos] == '1': + num += (1 << (level - 1 - pos)) + pastphi = bitdic[key[1:]] + ii = int(key[0]) + temp = np.dot(m[0, ii], pastphi) + bitdic[key] = temp + phi[num * fac::step] = temp + psi[num * fac::step] = np.dot(m[1, ii], pastphi) + prevkeys = newkeys + + return x, phi, psi + + +def morlet(M, w=5.0, s=1.0, complete=True): + """ + Complex Morlet wavelet. + + .. deprecated:: 1.12.0 + + scipy.signal.morlet is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + Parameters + ---------- + M : int + Length of the wavelet. + w : float, optional + Omega0. Default is 5 + s : float, optional + Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. + complete : bool, optional + Whether to use the complete or the standard version. + + Returns + ------- + morlet : (M,) ndarray + + See Also + -------- + morlet2 : Implementation of Morlet wavelet, compatible with `cwt`. + scipy.signal.gausspulse + + Notes + ----- + The standard version:: + + pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) + + This commonly used wavelet is often referred to simply as the + Morlet wavelet. Note that this simplified version can cause + admissibility problems at low values of `w`. + + The complete version:: + + pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) + + This version has a correction + term to improve admissibility. For `w` greater than 5, the + correction term is negligible. + + Note that the energy of the return wavelet is not normalised + according to `s`. + + The fundamental frequency of this wavelet in Hz is given + by ``f = 2*s*w*r / M`` where `r` is the sampling rate. + + Note: This function was created before `cwt` and is not compatible + with it. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> M = 100 + >>> s = 4.0 + >>> w = 2.0 + >>> wavelet = signal.morlet(M, s, w) + >>> plt.plot(wavelet.real, label="real") + >>> plt.plot(wavelet.imag, label="imag") + >>> plt.legend() + >>> plt.show() + + """ + warnings.warn(_msg % 'morlet', DeprecationWarning, stacklevel=2) + + x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M) + output = np.exp(1j * w * x) + + if complete: + output -= np.exp(-0.5 * (w**2)) + + output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25) + + return output + + +def ricker(points, a): + """ + Return a Ricker wavelet, also known as the "Mexican hat wavelet". + + .. deprecated:: 1.12.0 + + scipy.signal.ricker is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + It models the function: + + ``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``, + + where ``A = 2/(sqrt(3*a)*(pi**0.25))``. + + Parameters + ---------- + points : int + Number of points in `vector`. + Will be centered around 0. + a : scalar + Width parameter of the wavelet. + + Returns + ------- + vector : (N,) ndarray + Array of length `points` in shape of ricker curve. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> points = 100 + >>> a = 4.0 + >>> vec2 = signal.ricker(points, a) + >>> print(len(vec2)) + 100 + >>> plt.plot(vec2) + >>> plt.show() + + """ + warnings.warn(_msg % 'ricker', DeprecationWarning, stacklevel=2) + return _ricker(points, a) + + +def _ricker(points, a): + A = 2 / (np.sqrt(3 * a) * (np.pi**0.25)) + wsq = a**2 + vec = np.arange(0, points) - (points - 1.0) / 2 + xsq = vec**2 + mod = (1 - xsq / wsq) + gauss = np.exp(-xsq / (2 * wsq)) + total = A * mod * gauss + return total + + +def morlet2(M, s, w=5): + """ + Complex Morlet wavelet, designed to work with `cwt`. + + .. deprecated:: 1.12.0 + + scipy.signal.morlet2 is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + Returns the complete version of morlet wavelet, normalised + according to `s`:: + + exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s) + + Parameters + ---------- + M : int + Length of the wavelet. + s : float + Width parameter of the wavelet. + w : float, optional + Omega0. Default is 5 + + Returns + ------- + morlet : (M,) ndarray + + See Also + -------- + morlet : Implementation of Morlet wavelet, incompatible with `cwt` + + Notes + ----- + + .. versionadded:: 1.4.0 + + This function was designed to work with `cwt`. Because `morlet2` + returns an array of complex numbers, the `dtype` argument of `cwt` + should be set to `complex128` for best results. + + Note the difference in implementation with `morlet`. + The fundamental frequency of this wavelet in Hz is given by:: + + f = w*fs / (2*s*np.pi) + + where ``fs`` is the sampling rate and `s` is the wavelet width parameter. + Similarly we can get the wavelet width parameter at ``f``:: + + s = w*fs / (2*f*np.pi) + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> M = 100 + >>> s = 4.0 + >>> w = 2.0 + >>> wavelet = signal.morlet2(M, s, w) + >>> plt.plot(abs(wavelet)) + >>> plt.show() + + This example shows basic use of `morlet2` with `cwt` in time-frequency + analysis: + + >>> t, dt = np.linspace(0, 1, 200, retstep=True) + >>> fs = 1/dt + >>> w = 6. + >>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t) + >>> freq = np.linspace(1, fs/2, 100) + >>> widths = w*fs / (2*freq*np.pi) + >>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w) + >>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud') + >>> plt.show() + + """ + warnings.warn(_msg % 'morlet2', DeprecationWarning, stacklevel=2) + + x = np.arange(0, M) - (M - 1.0) / 2 + x = x / s + wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25) + output = np.sqrt(1/s) * wavelet + return output + + +def cwt(data, wavelet, widths, dtype=None, **kwargs): + """ + Continuous wavelet transform. + + .. deprecated:: 1.12.0 + + scipy.signal.cwt is deprecated in SciPy 1.12 and will be removed + in SciPy 1.15. We recommend using PyWavelets instead. + + Performs a continuous wavelet transform on `data`, + using the `wavelet` function. A CWT performs a convolution + with `data` using the `wavelet` function, which is characterized + by a width parameter and length parameter. The `wavelet` function + is allowed to be complex. + + Parameters + ---------- + data : (N,) ndarray + data on which to perform the transform. + wavelet : function + Wavelet function, which should take 2 arguments. + The first argument is the number of points that the returned vector + will have (len(wavelet(length,width)) == length). + The second is a width parameter, defining the size of the wavelet + (e.g. standard deviation of a gaussian). See `ricker`, which + satisfies these requirements. + widths : (M,) sequence + Widths to use for transform. + dtype : data-type, optional + The desired data type of output. Defaults to ``float64`` if the + output of `wavelet` is real and ``complex128`` if it is complex. + + .. versionadded:: 1.4.0 + + kwargs + Keyword arguments passed to wavelet function. + + .. versionadded:: 1.4.0 + + Returns + ------- + cwt: (M, N) ndarray + Will have shape of (len(widths), len(data)). + + Notes + ----- + + .. versionadded:: 1.4.0 + + For non-symmetric, complex-valued wavelets, the input signal is convolved + with the time-reversed complex-conjugate of the wavelet data [1]. + + :: + + length = min(10 * width[ii], len(data)) + cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii], + **kwargs))[::-1], mode='same') + + References + ---------- + .. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)", + Academic Press, 2009. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 200, endpoint=False) + >>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) + >>> widths = np.arange(1, 31) + >>> cwtmatr = signal.cwt(sig, signal.ricker, widths) + + .. note:: For cwt matrix plotting it is advisable to flip the y-axis + + >>> cwtmatr_yflip = np.flipud(cwtmatr) + >>> plt.imshow(cwtmatr_yflip, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto', + ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) + >>> plt.show() + """ + warnings.warn(_msg % 'cwt', DeprecationWarning, stacklevel=2) + return _cwt(data, wavelet, widths, dtype, **kwargs) + + +def _cwt(data, wavelet, widths, dtype=None, **kwargs): + # Determine output type + if dtype is None: + if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG': + dtype = np.complex128 + else: + dtype = np.float64 + + output = np.empty((len(widths), len(data)), dtype=dtype) + for ind, width in enumerate(widths): + N = np.min([10 * width, len(data)]) + wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1]) + output[ind] = convolve(data, wavelet_data, mode='same') + return output diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/bsplines.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/bsplines.py new file mode 100644 index 0000000000000000000000000000000000000000..a90408cbc66684fe5b4fa7802e2cd9bd71af9680 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/bsplines.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'spline_filter', 'gauss_spline', + 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval', + 'zeros_like', 'array', 'arctan2', + 'tan', 'arange', 'floor', 'exp', 'greater', 'add', + 'cspline2d', 'sepfir2d' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="bsplines", + private_modules=["_bsplines"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/filter_design.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..24f5116e687eaa23a45827ff1e1c1b50f6dd4630 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/filter_design.py @@ -0,0 +1,34 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', + 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', + 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', + 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', + 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', + 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', + 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', + 'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk', + 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', + 'gammatone', 'iircomb', + 'atleast_1d', 'poly', 'polyval', 'roots', 'resize', 'absolute', + 'tan', 'log10', 'arcsinh', 'exp', 'arccosh', + 'ceil', 'conjugate', 'append', 'prod', 'full', 'array', 'mintypecode', + 'npp_polyval', 'polyvalfromroots', 'optimize', 'sp_fft', 'comb', + 'float_factorial', 'abs', 'maxflat', 'yulewalk', + 'EPSILON', 'filter_dict', 'band_dict', 'bessel_norms' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="filter_design", + private_modules=["_filter_design"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..e320fc8b4fe24002cc53895214d75e9f36719507 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase', + 'ceil', 'log', 'irfft', 'fft', 'ifft', 'sinc', 'toeplitz', + 'hankel', 'solve', 'LinAlgError', 'LinAlgWarning', 'lstsq' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="fir_filter_design", + private_modules=["_fir_filter_design"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/lti_conversion.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..2d0399900b89292fffcae6c51df3b74b897e5812 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/lti_conversion.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete','eye', 'atleast_2d', + 'poly', 'prod', 'array', 'outer', 'linalg', 'tf2zpk', 'zpk2tf', 'normalize' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="lti_conversion", + private_modules=["_lti_conversion"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/ltisys.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..4c261d468e680f5c5a0272a65d76cbd2fa39ae54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/ltisys.py @@ -0,0 +1,30 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'impulse', 'step', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode', 's_qr', 'linalg', + 'tf2zpk', 'zpk2tf', 'normalize', 'freqs', + 'freqz', 'freqs_zpk', 'freqz_zpk', 'tf2ss', 'abcd_normalize', + 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete', 'atleast_1d', + 'squeeze', 'transpose', 'linspace', + 'LinearTimeInvariant', 'TransferFunctionContinuous', + 'TransferFunctionDiscrete', 'ZerosPolesGainContinuous', + 'ZerosPolesGainDiscrete', 'StateSpaceContinuous', + 'StateSpaceDiscrete', 'Bunch' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="ltisys", + private_modules=["_ltisys"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/signaltools.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..88f52a5061c6756ce1397fb015f4822346710b41 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/signaltools.py @@ -0,0 +1,29 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'correlate', 'correlation_lags', 'correlate2d', + 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', + 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength', + 'timeit', 'cKDTree', 'dlti', 'upfirdn', 'linalg', + 'sp_fft', 'lambertw', 'get_window', 'axis_slice', 'axis_reverse', + 'odd_ext', 'even_ext', 'const_ext', 'cheby1', 'firwin' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="signaltools", + private_modules=["_signaltools"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/spectral.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..2e38b400aa750f94fed4d65393acbbdcafdde614 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/spectral.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'periodogram', 'welch', 'lombscargle', 'csd', 'coherence', + 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA', + 'sp_fft', 'get_window', 'const_ext', 'even_ext', + 'odd_ext', 'zero_ext' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="spectral", + private_modules=["_spectral_py"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1fde05f0934300bcc926edfd426902afb792b6d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa0e44031de7f35620fbbc4717590dec86f8f778 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98a5bdea9af96269ef0c500754dd84e6c74f3461 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5c23db6c0cdcde6f12034056a5dca1624a83efe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/_scipy_spectral_test_shim.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/_scipy_spectral_test_shim.py new file mode 100644 index 0000000000000000000000000000000000000000..c23f310bcae4fa85558f7f07cddb25874a0ec7d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/_scipy_spectral_test_shim.py @@ -0,0 +1,488 @@ +"""Helpers to utilize existing stft / istft tests for testing `ShortTimeFFT`. + +This module provides the functions stft_compare() and istft_compare(), which, +compares the output between the existing (i)stft() and the shortTimeFFT based +_(i)stft_wrapper() implementations in this module. + +For testing add the following imports to the file ``tests/test_spectral.py``:: + + from ._scipy_spectral_test_shim import stft_compare as stft + from ._scipy_spectral_test_shim import istft_compare as istft + +and remove the existing imports of stft and istft. + +The idea of these wrappers is not to provide a backward-compatible interface +but to demonstrate that the ShortTimeFFT implementation is at least as capable +as the existing one and delivers comparable results. Furthermore, the +wrappers highlight the different philosophies of the implementations, +especially in the border handling. +""" +import platform +from typing import cast, Literal + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.signal import ShortTimeFFT +from scipy.signal import csd, get_window, stft, istft +from scipy.signal._arraytools import const_ext, even_ext, odd_ext, zero_ext +from scipy.signal._short_time_fft import FFT_MODE_TYPE +from scipy.signal._spectral_py import _spectral_helper, _triage_segments, \ + _median_bias + + +def _stft_wrapper(x, fs=1.0, window='hann', nperseg=256, noverlap=None, + nfft=None, detrend=False, return_onesided=True, + boundary='zeros', padded=True, axis=-1, scaling='spectrum'): + """Wrapper for the SciPy `stft()` function based on `ShortTimeFFT` for + unit testing. + + Handling the boundary and padding is where `ShortTimeFFT` and `stft()` + differ in behavior. Parts of `_spectral_helper()` were copied to mimic + the` stft()` behavior. + + This function is meant to be solely used by `stft_compare()`. + """ + if scaling not in ('psd', 'spectrum'): # same errors as in original stft: + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + # The following lines are taken from the original _spectral_helper(): + boundary_funcs = {'even': even_ext, + 'odd': odd_ext, + 'constant': const_ext, + 'zeros': zero_ext, + None: None} + + if boundary not in boundary_funcs: + raise ValueError(f"Unknown boundary option '{boundary}', must be one" + + f" of: {list(boundary_funcs.keys())}") + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + win, nperseg = _triage_segments(window, nperseg, + input_length=x.shape[axis]) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + n = x.shape[axis] + + # Padding occurs after boundary extension, so that the extended signal ends + # in zeros, instead of introducing an impulse at the end. + # I.e. if x = [..., 3, 2] + # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] + # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] + + if boundary is not None: + ext_func = boundary_funcs[boundary] + # Extend by nperseg//2 in front and back: + x = ext_func(x, nperseg//2, axis=axis) + + if padded: + # Pad to integer number of windowed segments + # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg + x = np.moveaxis(x, axis, -1) + + # This is an edge case where shortTimeFFT returns one more time slice + # than the Scipy stft() shorten to remove last time slice: + if n % 2 == 1 and nperseg % 2 == 1 and noverlap % 2 == 1: + x = x[..., :axis - 1] + + nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg + zeros_shape = list(x.shape[:-1]) + [nadd] + x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) + x = np.moveaxis(x, -1, axis) + + # ... end original _spectral_helper() code. + scale_to = {'spectrum': 'magnitude', 'psd': 'psd'}[scaling] + + if np.iscomplexobj(x) and return_onesided: + return_onesided = False + # using cast() to make mypy happy: + fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided') + + ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, + scale_to=scale_to, phase_shift=None) + + k_off = nperseg // 2 + p0 = 0 # ST.lower_border_end[1] + 1 + nn = x.shape[axis] if padded else n+k_off+1 + p1 = ST.upper_border_begin(nn)[1] # ST.p_max(n) + 1 + + # This is bad hack to pass the test test_roundtrip_boundary_extension(): + if padded is True and nperseg - noverlap == 1: + p1 -= nperseg // 2 - 1 # the reasoning behind this is not clear to me + + detr = None if detrend is False else detrend + Sxx = ST.stft_detrend(x, detr, p0, p1, k_offset=k_off, axis=axis) + t = ST.t(nn, 0, p1 - p0, k_offset=0 if boundary is not None else k_off) + if x.dtype in (np.float32, np.complex64): + Sxx = Sxx.astype(np.complex64) + + # workaround for test_average_all_segments() - seems to be buggy behavior: + if boundary is None and padded is False: + t, Sxx = t[1:-1], Sxx[..., :-2] + t -= k_off / fs + + return ST.f, t, Sxx + + +def _istft_wrapper(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, input_onesided=True, boundary=True, time_axis=-1, + freq_axis=-2, scaling='spectrum') -> \ + tuple[np.ndarray, np.ndarray, tuple[int, int]]: + """Wrapper for the SciPy `istft()` function based on `ShortTimeFFT` for + unit testing. + + Note that only option handling is implemented as far as to handle the unit + tests. E.g., the case ``nperseg=None`` is not handled. + + This function is meant to be solely used by `istft_compare()`. + """ + # *** Lines are taken from _spectral_py.istft() ***: + if Zxx.ndim < 2: + raise ValueError('Input stft must be at least 2d!') + + if freq_axis == time_axis: + raise ValueError('Must specify differing time and frequency axes!') + + nseg = Zxx.shape[time_axis] + + if input_onesided: + # Assume even segment length + n_default = 2*(Zxx.shape[freq_axis] - 1) + else: + n_default = Zxx.shape[freq_axis] + + # Check windowing parameters + if nperseg is None: + nperseg = n_default + else: + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if nfft is None: + if input_onesided and (nperseg == n_default + 1): + # Odd nperseg, no FFT padding + nfft = nperseg + else: + nfft = n_default + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Get window as array + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError(f'window must have length of {nperseg}') + + outputlength = nperseg + (nseg-1)*nstep + # *** End block of: Taken from _spectral_py.istft() *** + + # Using cast() to make mypy happy: + fft_mode = cast(FFT_MODE_TYPE, 'onesided' if input_onesided else 'twosided') + scale_to = cast(Literal['magnitude', 'psd'], + {'spectrum': 'magnitude', 'psd': 'psd'}[scaling]) + + ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, + scale_to=scale_to, phase_shift=None) + + if boundary: + j = nperseg if nperseg % 2 == 0 else nperseg - 1 + k0 = ST.k_min + nperseg // 2 + k1 = outputlength - j + k0 + else: + raise NotImplementedError("boundary=False does not make sense with" + + "ShortTimeFFT.istft()!") + + x = ST.istft(Zxx, k0=k0, k1=k1, f_axis=freq_axis, t_axis=time_axis) + t = np.arange(k1 - k0) * ST.T + k_hi = ST.upper_border_begin(k1 - k0)[0] + # using cast() to make mypy happy: + return t, x, (ST.lower_border_end[0], k_hi) + + +def _csd_wrapper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, average='mean'): + """Wrapper for the `csd()` function based on `ShortTimeFFT` for + unit testing. + """ + freqs, _, Pxy = _csd_test_shim(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis) + + # The following code is taken from csd(): + if len(Pxy.shape) >= 2 and Pxy.size > 0: + if Pxy.shape[-1] > 1: + if average == 'median': + # np.median must be passed real arrays for the desired result + bias = _median_bias(Pxy.shape[-1]) + if np.iscomplexobj(Pxy): + Pxy = (np.median(np.real(Pxy), axis=-1) + + 1j * np.median(np.imag(Pxy), axis=-1)) + else: + Pxy = np.median(Pxy, axis=-1) + Pxy /= bias + elif average == 'mean': + Pxy = Pxy.mean(axis=-1) + else: + raise ValueError(f'average must be "median" or "mean", got {average}') + else: + Pxy = np.reshape(Pxy, Pxy.shape[:-1]) + + return freqs, Pxy + + +def _csd_test_shim(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1): + """Compare output of _spectral_helper() and ShortTimeFFT, more + precisely _spect_helper_csd() for used in csd_wrapper(). + + The motivation of this function is to test if the ShortTimeFFT-based + wrapper `_spect_helper_csd()` returns the same values as `_spectral_helper`. + This function should only be usd by csd() in (unit) testing. + """ + freqs, t, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis, + mode='psd') + freqs1, Pxy1 = _spect_helper_csd(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis) + + np.testing.assert_allclose(freqs1, freqs) + amax_Pxy = max(np.abs(Pxy).max(), 1) if Pxy.size else 1 + atol = np.finfo(Pxy.dtype).resolution * amax_Pxy # needed for large Pxy + # for c_ in range(Pxy.shape[-1]): + # np.testing.assert_allclose(Pxy1[:, c_], Pxy[:, c_], atol=atol) + np.testing.assert_allclose(Pxy1, Pxy, atol=atol) + return freqs, t, Pxy + + +def _spect_helper_csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1): + """Wrapper for replacing _spectral_helper() by using the ShortTimeFFT + for use by csd(). + + This function should be only used by _csd_test_shim() and is only useful + for testing the ShortTimeFFT implementation. + """ + + # The following lines are taken from the original _spectral_helper(): + same_data = y is x + axis = int(axis) + + # Ensure we have np.arrays, get outdtype + x = np.asarray(x) + if not same_data: + y = np.asarray(y) + # outdtype = np.result_type(x, y, np.complex64) + # else: + # outdtype = np.result_type(x, np.complex64) + + if not same_data: + # Check if we can broadcast the outer axes together + xouter = list(x.shape) + youter = list(y.shape) + xouter.pop(axis) + youter.pop(axis) + try: + outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape + except ValueError as e: + raise ValueError('x and y cannot be broadcast together.') from e + + if same_data: + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape) + else: + if x.size == 0 or y.size == 0: + outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) + emptyout = np.moveaxis(np.empty(outshape), -1, axis) + return emptyout, emptyout + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + n = x.shape[axis] if same_data else max(x.shape[axis], y.shape[axis]) + win, nperseg = _triage_segments(window, nperseg, input_length=n) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg // 2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + if np.iscomplexobj(x) and return_onesided: + return_onesided = False + + # using cast() to make mypy happy: + fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided + else 'twosided') + scale = {'spectrum': 'magnitude', 'density': 'psd'}[scaling] + SFT = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, + scale_to=scale, phase_shift=None) + + # _spectral_helper() calculates X.conj()*Y instead of X*Y.conj(): + Pxy = SFT.spectrogram(y, x, detr=None if detrend is False else detrend, + p0=0, p1=(n-noverlap)//SFT.hop, k_offset=nperseg//2, + axis=axis).conj() + # Note: + # 'onesided2X' scaling of ShortTimeFFT conflicts with the + # scaling='spectrum' parameter, since it doubles the squared magnitude, + # which in the view of the ShortTimeFFT implementation does not make sense. + # Hence, the doubling of the square is implemented here: + if return_onesided: + f_axis = Pxy.ndim - 1 + axis if axis < 0 else axis + Pxy = np.moveaxis(Pxy, f_axis, -1) + Pxy[..., 1:-1 if SFT.mfft % 2 == 0 else None] *= 2 + Pxy = np.moveaxis(Pxy, -1, f_axis) + + return SFT.f, Pxy + + +def stft_compare(x, fs=1.0, window='hann', nperseg=256, noverlap=None, + nfft=None, detrend=False, return_onesided=True, + boundary='zeros', padded=True, axis=-1, scaling='spectrum'): + """Assert that the results from the existing `stft()` and `_stft_wrapper()` + are close to each other. + + For comparing the STFT values an absolute tolerance of the floating point + resolution was added to circumvent problems with the following tests: + * For float32 the tolerances are much higher in + TestSTFT.test_roundtrip_float32()). + * The TestSTFT.test_roundtrip_scaling() has a high relative deviation. + Interestingly this did not appear in Scipy 1.9.1 but only in the current + development version. + """ + kw = dict(x=x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, + nfft=nfft, detrend=detrend, return_onesided=return_onesided, + boundary=boundary, padded=padded, axis=axis, scaling=scaling) + f, t, Zxx = stft(**kw) + f_wrapper, t_wrapper, Zxx_wrapper = _stft_wrapper(**kw) + + e_msg_part = " of `stft_wrapper()` differ from `stft()`." + assert_allclose(f_wrapper, f, err_msg=f"Frequencies {e_msg_part}") + assert_allclose(t_wrapper, t, err_msg=f"Time slices {e_msg_part}") + + # Adapted tolerances to account for: + atol = np.finfo(Zxx.dtype).resolution * 2 + assert_allclose(Zxx_wrapper, Zxx, atol=atol, + err_msg=f"STFT values {e_msg_part}") + return f, t, Zxx + + +def istft_compare(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, input_onesided=True, boundary=True, time_axis=-1, + freq_axis=-2, scaling='spectrum'): + """Assert that the results from the existing `istft()` and + `_istft_wrapper()` are close to each other. + + Quirks: + * If ``boundary=False`` the comparison is skipped, since it does not + make sense with ShortTimeFFT.istft(). Only used in test + TestSTFT.test_roundtrip_boundary_extension(). + * If ShortTimeFFT.istft() decides the STFT is not invertible, the + comparison is skipped, since istft() only emits a warning and does not + return a correct result. Only used in + ShortTimeFFT.test_roundtrip_not_nola(). + * For comparing the signals an absolute tolerance of the floating point + resolution was added to account for the low accuracy of float32 (Occurs + only in TestSTFT.test_roundtrip_float32()). + """ + kw = dict(Zxx=Zxx, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, input_onesided=input_onesided, + boundary=boundary, time_axis=time_axis, freq_axis=freq_axis, + scaling=scaling) + + t, x = istft(**kw) + if not boundary: # skip test_roundtrip_boundary_extension(): + return t, x # _istft_wrapper does() not implement this case + try: # if inversion fails, istft() only emits a warning: + t_wrapper, x_wrapper, (k_lo, k_hi) = _istft_wrapper(**kw) + except ValueError as v: # Do nothing if inversion fails: + if v.args[0] == "Short-time Fourier Transform not invertible!": + return t, x + raise v + + e_msg_part = " of `istft_wrapper()` differ from `istft()`" + assert_allclose(t, t_wrapper, err_msg=f"Sample times {e_msg_part}") + + # Adapted tolerances to account for resolution loss: + atol = np.finfo(x.dtype).resolution*2 # instead of default atol = 0 + rtol = 1e-7 # default for np.allclose() + + # Relax atol on 32-Bit platforms a bit to pass CI tests. + # - Not clear why there are discrepancies (in the FFT maybe?) + # - Not sure what changed on 'i686' since earlier on those test passed + if x.dtype == np.float32 and platform.machine() == 'i686': + # float32 gets only used by TestSTFT.test_roundtrip_float32() so + # we are using the tolerances from there to circumvent CI problems + atol, rtol = 1e-4, 1e-5 + elif platform.machine() in ('aarch64', 'i386', 'i686'): + atol = max(atol, 1e-12) # 2e-15 seems too tight for 32-Bit platforms + + assert_allclose(x_wrapper[k_lo:k_hi], x[k_lo:k_hi], atol=atol, rtol=rtol, + err_msg=f"Signal values {e_msg_part}") + return t, x + + +def csd_compare(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, average='mean'): + """Assert that the results from the existing `csd()` and `_csd_wrapper()` + are close to each other. """ + kw = dict(x=x, y=y, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + return_onesided=return_onesided, scaling=scaling, axis=axis, + average=average) + freqs0, Pxy0 = csd(**kw) + freqs1, Pxy1 = _csd_wrapper(**kw) + + assert_allclose(freqs1, freqs0) + assert_allclose(Pxy1, Pxy0) + assert_allclose(freqs1, freqs0) + return freqs0, Pxy0 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py new file mode 100644 index 0000000000000000000000000000000000000000..d129de74e5df00c22bc0b82c7d3f7b52483941f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py @@ -0,0 +1,122 @@ +""" +Some signal functions implemented using mpmath. +""" + +try: + import mpmath +except ImportError: + mpmath = None + + +def _prod(seq): + """Returns the product of the elements in the sequence `seq`.""" + p = 1 + for elem in seq: + p *= elem + return p + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles. + + This is simply len(p) - len(z), which must be nonnegative. + A ValueError is raised if len(p) < len(z). + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + return degree + + +def _zpkbilinear(z, p, k, fs): + """Bilinear transformation to convert a filter from analog to digital.""" + + degree = _relative_degree(z, p) + + fs2 = 2*fs + + # Bilinear transform the poles and zeros + z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z] + p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p] + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z.extend([-1] * degree) + + # Compensate for gain change + numer = _prod(fs2 - z1 for z1 in z) + denom = _prod(fs2 - p1 for p1 in p) + k_z = k * numer / denom + + return z_z, p_z, k_z.real + + +def _zpklp2lp(z, p, k, wo=1): + """Transform a lowpass filter to a different cutoff frequency.""" + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = [wo * z1 for z1 in z] + p_lp = [wo * p1 for p1 in p] + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def _butter_analog_poles(n): + """ + Poles of an analog Butterworth lowpass filter. + + This is the same calculation as scipy.signal.buttap(n) or + scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, + and only the poles are returned. + """ + poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)] + return poles + + +def butter_lp(n, Wn): + """ + Lowpass Butterworth digital filter design. + + This computes the same result as scipy.signal.butter(n, Wn, output='zpk'), + but it uses mpmath, and the results are returned in lists instead of NumPy + arrays. + """ + zeros = [] + poles = _butter_analog_poles(n) + k = 1 + fs = 2 + warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs) + z, p, k = _zpklp2lp(zeros, poles, k, wo=warped) + z, p, k = _zpkbilinear(z, p, k, fs=fs) + return z, p, k + + +def zpkfreqz(z, p, k, worN=None): + """ + Frequency response of a filter in zpk format, using mpmath. + + This is the same calculation as scipy.signal.freqz, but the input is in + zpk format, the calculation is performed using mpath, and the results are + returned in lists instead of NumPy arrays. + """ + if worN is None or isinstance(worN, int): + N = worN or 512 + ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)] + else: + ws = worN + + h = [] + for wk in ws: + zm1 = mpmath.exp(1j * wk) + numer = _prod([zm1 - t for t in z]) + denom = _prod([zm1 - t for t in p]) + hk = k * numer / denom + h.append(hk) + return ws, h diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_array_tools.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_array_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..81503b7e267cf9f74999d283b0d33b012fd0f77c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_array_tools.py @@ -0,0 +1,111 @@ +import numpy as np + +from numpy.testing import assert_array_equal +from pytest import raises as assert_raises + +from scipy.signal._arraytools import (axis_slice, axis_reverse, + odd_ext, even_ext, const_ext, zero_ext) + + +class TestArrayTools: + + def test_axis_slice(self): + a = np.arange(12).reshape(3, 4) + + s = axis_slice(a, start=0, stop=1, axis=0) + assert_array_equal(s, a[0:1, :]) + + s = axis_slice(a, start=-1, axis=0) + assert_array_equal(s, a[-1:, :]) + + s = axis_slice(a, start=0, stop=1, axis=1) + assert_array_equal(s, a[:, 0:1]) + + s = axis_slice(a, start=-1, axis=1) + assert_array_equal(s, a[:, -1:]) + + s = axis_slice(a, start=0, step=2, axis=0) + assert_array_equal(s, a[::2, :]) + + s = axis_slice(a, start=0, step=2, axis=1) + assert_array_equal(s, a[:, ::2]) + + def test_axis_reverse(self): + a = np.arange(12).reshape(3, 4) + + r = axis_reverse(a, axis=0) + assert_array_equal(r, a[::-1, :]) + + r = axis_reverse(a, axis=1) + assert_array_equal(r, a[:, ::-1]) + + def test_odd_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + odd = odd_ext(a, 2, axis=1) + expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [11, 10, 9, 8, 7, 6, 5, 4, 3]]) + assert_array_equal(odd, expected) + + odd = odd_ext(a, 1, axis=0) + expected = np.array([[-7, -4, -1, 2, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [17, 14, 11, 8, 5]]) + assert_array_equal(odd, expected) + + assert_raises(ValueError, odd_ext, a, 2, axis=0) + assert_raises(ValueError, odd_ext, a, 5, axis=1) + + def test_even_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + even = even_ext(a, 2, axis=1) + expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3], + [7, 8, 9, 8, 7, 6, 5, 6, 7]]) + assert_array_equal(even, expected) + + even = even_ext(a, 1, axis=0) + expected = np.array([[9, 8, 7, 6, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [1, 2, 3, 4, 5]]) + assert_array_equal(even, expected) + + assert_raises(ValueError, even_ext, a, 2, axis=0) + assert_raises(ValueError, even_ext, a, 5, axis=1) + + def test_const_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + const = const_ext(a, 2, axis=1) + expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5], + [9, 9, 9, 8, 7, 6, 5, 5, 5]]) + assert_array_equal(const, expected) + + const = const_ext(a, 1, axis=0) + expected = np.array([[1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [9, 8, 7, 6, 5]]) + assert_array_equal(const, expected) + + def test_zero_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + zero = zero_ext(a, 2, axis=1) + expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0], + [0, 0, 9, 8, 7, 6, 5, 0, 0]]) + assert_array_equal(zero, expected) + + zero = zero_ext(a, 1, axis=0) + expected = np.array([[0, 0, 0, 0, 0], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [0, 0, 0, 0, 0]]) + assert_array_equal(zero, expected) + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_bsplines.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_bsplines.py new file mode 100644 index 0000000000000000000000000000000000000000..828276edd4df0acba0fb5a652e4305e44ce34566 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_bsplines.py @@ -0,0 +1,186 @@ +# pylint: disable=missing-docstring +import numpy as np +from numpy import array +from numpy.testing import (assert_allclose, assert_array_equal, + assert_almost_equal) +import pytest +from pytest import raises + +import scipy.signal._bsplines as bsp +from scipy import signal + + +class TestBSplines: + """Test behaviors of B-splines. Some of the values tested against were + returned as of SciPy 1.1.0 and are included for regression testing + purposes. Others (at integer points) are compared to theoretical + expressions (cf. Unser, Aldroubi, Eden, IEEE TSP 1993, Table 1).""" + + def test_spline_filter(self): + np.random.seed(12457) + # Test the type-error branch + raises(TypeError, bsp.spline_filter, array([0]), 0) + # Test the real branch + np.random.seed(12457) + data_array_real = np.random.rand(12, 12) + # make the magnitude exceed 1, and make some negative + data_array_real = 10*(1-2*data_array_real) + result_array_real = array( + [[-.463312621, 8.33391222, .697290949, 5.28390836, + 5.92066474, 6.59452137, 9.84406950, -8.78324188, + 7.20675750, -8.17222994, -4.38633345, 9.89917069], + [2.67755154, 6.24192170, -3.15730578, 9.87658581, + -9.96930425, 3.17194115, -4.50919947, 5.75423446, + 9.65979824, -8.29066885, .971416087, -2.38331897], + [-7.08868346, 4.89887705, -1.37062289, 7.70705838, + 2.51526461, 3.65885497, 5.16786604, -8.77715342e-03, + 4.10533325, 9.04761993, -.577960351, 9.86382519], + [-4.71444301, -1.68038985, 2.84695116, 1.14315938, + -3.17127091, 1.91830461, 7.13779687, -5.35737482, + -9.66586425, -9.87717456, 9.93160672, 4.71948144], + [9.49551194, -1.92958436, 6.25427993, -9.05582911, + 3.97562282, 7.68232426, -1.04514824, -5.86021443, + -8.43007451, 5.47528997, 2.06330736, -8.65968112], + [-8.91720100, 8.87065356, 3.76879937, 2.56222894, + -.828387146, 8.72288903, 6.42474741, -6.84576083, + 9.94724115, 6.90665380, -6.61084494, -9.44907391], + [9.25196790, -.774032030, 7.05371046, -2.73505725, + 2.53953305, -1.82889155, 2.95454824, -1.66362046, + 5.72478916, -3.10287679, 1.54017123, -7.87759020], + [-3.98464539, -2.44316992, -1.12708657, 1.01725672, + -8.89294671, -5.42145629, -6.16370321, 2.91775492, + 9.64132208, .702499998, -2.02622392, 1.56308431], + [-2.22050773, 7.89951554, 5.98970713, -7.35861835, + 5.45459283, -7.76427957, 3.67280490, -4.05521315, + 4.51967507, -3.22738749, -3.65080177, 3.05630155], + [-6.21240584, -.296796126, -8.34800163, 9.21564563, + -3.61958784, -4.77120006, -3.99454057, 1.05021988e-03, + -6.95982829, 6.04380797, 8.43181250, -2.71653339], + [1.19638037, 6.99718842e-02, 6.72020394, -2.13963198, + 3.75309875, -5.70076744, 5.92143551, -7.22150575, + -3.77114594, -1.11903194, -5.39151466, 3.06620093], + [9.86326886, 1.05134482, -7.75950607, -3.64429655, + 7.81848957, -9.02270373, 3.73399754, -4.71962549, + -7.71144306, 3.78263161, 6.46034818, -4.43444731]]) + assert_allclose(bsp.spline_filter(data_array_real, 0), + result_array_real) + + def test_gauss_spline(self): + np.random.seed(12459) + assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342) + assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217])) + + def test_gauss_spline_list(self): + # regression test for gh-12152 (accept array_like) + knots = [-1.0, 0.0, -1.0] + assert_almost_equal(bsp.gauss_spline(knots, 3), + array([0.15418033, 0.6909883, 0.15418033])) + + def test_cspline1d(self): + np.random.seed(12462) + assert_array_equal(bsp.cspline1d(array([0])), [0.]) + c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378, + 4.78893826]) + # test lamda != 0 + assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d) + c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812, + 5.21051638]) + assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0) + + def test_qspline1d(self): + np.random.seed(12463) + assert_array_equal(bsp.qspline1d(array([0])), [0.]) + # test lamda != 0 + raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.) + raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.) + q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055, + 5.14634135]) + assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0) + + def test_cspline1d_eval(self): + np.random.seed(12464) + assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.])) + assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []), + array([])) + x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] + dx = x[1]-x[0] + newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., + -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., + 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., + 12.5] + y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, + 1.396, 4.094]) + cj = bsp.cspline1d(y) + newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068, + 4.21600281, 6.04643068, 6.864, 5.16924703, 3.514, + 4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433, + 7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396, + 2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879, + 7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759, + 6.80717667, 6.203, 4.41570658]) + assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) + + def test_qspline1d_eval(self): + np.random.seed(12465) + assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.])) + assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []), + array([])) + x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] + dx = x[1]-x[0] + newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., + -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., + 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., + 12.5] + y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, + 1.396, 4.094]) + cj = bsp.qspline1d(y) + newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915, + 4.21600002, 5.91436915, 6.864, 5.18390821, 3.514, + 4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433, + 7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396, + 2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879, + 7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759, + 6.71900226, 6.203, 4.49418159]) + assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) + + +def test_sepfir2d_invalid_filter(): + filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0]) + image = np.random.rand(7, 9) + # No error for odd lengths + signal.sepfir2d(image, filt, filt[2:]) + + # Row or column filter must be odd + with pytest.raises(ValueError, match="odd length"): + signal.sepfir2d(image, filt, filt[1:]) + with pytest.raises(ValueError, match="odd length"): + signal.sepfir2d(image, filt[1:], filt) + + # Filters must be 1-dimensional + with pytest.raises(ValueError, match="object too deep"): + signal.sepfir2d(image, filt.reshape(1, -1), filt) + with pytest.raises(ValueError, match="object too deep"): + signal.sepfir2d(image, filt, filt.reshape(1, -1)) + +def test_sepfir2d_invalid_image(): + filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0]) + image = np.random.rand(8, 8) + + # Image must be 2 dimensional + with pytest.raises(ValueError, match="object too deep"): + signal.sepfir2d(image.reshape(4, 4, 4), filt, filt) + + with pytest.raises(ValueError, match="object of too small depth"): + signal.sepfir2d(image[0], filt, filt) + + +def test_cspline2d(): + np.random.seed(181819142) + image = np.random.rand(71, 73) + signal.cspline2d(image, 8.0) + + +def test_qspline2d(): + np.random.seed(181819143) + image = np.random.rand(71, 73) + signal.qspline2d(image) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..51b9be56e29c4e0448020c2d8e8ff6e7e336f8c3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py @@ -0,0 +1,416 @@ +import numpy as np +from numpy.testing import \ + assert_array_almost_equal, assert_almost_equal, \ + assert_allclose, assert_equal + +import pytest +from scipy.signal import cont2discrete as c2d +from scipy.signal import dlsim, ss2tf, ss2zpk, lsim, lti +from scipy.signal import tf2ss, impulse, dimpulse, step, dstep + +# Author: Jeffrey Armstrong +# March 29, 2011 + + +class TestC2D: + def test_zoh(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.324360635350064) + # c and d in discrete should be equal to their continuous counterparts + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cc, cd) + assert_array_almost_equal(dc, dd) + assert_almost_equal(dt_requested, dt) + + def test_foh(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + # True values are verified with Matlab + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.420839287058789) + cd_truth = cc + dd_truth = np.array([[0.260262223725224], + [0.297442541400256], + [-0.144098411624840]]) + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_impulse(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [0.0]]) + + # True values are verified with Matlab + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.412180317675032) + cd_truth = cc + dd_truth = np.array([[0.4375], [0.5], [0.3125]]) + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='impulse') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_gbt(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + alpha = 1.0 / 3.0 + + ad_truth = 1.6 * np.eye(2) + bd_truth = np.full((2, 1), 0.3) + cd_truth = np.array([[0.9, 1.2], + [1.2, 1.2], + [1.2, 0.3]]) + dd_truth = np.array([[0.175], + [0.2], + [-0.205]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='gbt', alpha=alpha) + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_euler(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 1.5 * np.eye(2) + bd_truth = np.full((2, 1), 0.25) + cd_truth = np.array([[0.75, 1.0], + [1.0, 1.0], + [1.0, 0.25]]) + dd_truth = dc + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='euler') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_backward_diff(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 2.0 * np.eye(2) + bd_truth = np.full((2, 1), 0.5) + cd_truth = np.array([[1.5, 2.0], + [2.0, 2.0], + [2.0, 0.5]]) + dd_truth = np.array([[0.875], + [1.0], + [0.295]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='backward_diff') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_bilinear(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = (5.0 / 3.0) * np.eye(2) + bd_truth = np.full((2, 1), 1.0 / 3.0) + cd_truth = np.array([[1.0, 4.0 / 3.0], + [4.0 / 3.0, 4.0 / 3.0], + [4.0 / 3.0, 1.0 / 3.0]]) + dd_truth = np.array([[0.291666666666667], + [1.0 / 3.0], + [-0.121666666666667]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + # Same continuous system again, but change sampling rate + + ad_truth = 1.4 * np.eye(2) + bd_truth = np.full((2, 1), 0.2) + cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) + dd_truth = np.array([[0.175], [0.2], [-0.205]]) + + dt_requested = 1.0 / 3.0 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_transferfunction(self): + numc = np.array([0.25, 0.25, 0.5]) + denc = np.array([0.75, 0.75, 1.0]) + + numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]]) + dend = np.array([1.0, -1.351394049721225, 0.606530659712634]) + + dt_requested = 0.5 + + num, den, dt = c2d((numc, denc), dt_requested, method='zoh') + + assert_array_almost_equal(numd, num) + assert_array_almost_equal(dend, den) + assert_almost_equal(dt_requested, dt) + + def test_zerospolesgain(self): + zeros_c = np.array([0.5, -0.5]) + poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + k_c = 1.0 + + zeros_d = [1.23371727305860, 0.735356894461267] + polls_d = [0.938148335039729 + 0.346233593780536j, + 0.938148335039729 - 0.346233593780536j] + k_d = 1.0 + + dt_requested = 0.5 + + zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested, + method='zoh') + + assert_array_almost_equal(zeros_d, zeros) + assert_array_almost_equal(polls_d, poles) + assert_almost_equal(k_d, k) + assert_almost_equal(dt_requested, dt) + + def test_gbt_with_sio_tf_and_zpk(self): + """Test method='gbt' with alpha=0.25 for tf and zpk cases.""" + # State space coefficients for the continuous SIO system. + A = -1.0 + B = 1.0 + C = 1.0 + D = 0.5 + + # The continuous transfer function coefficients. + cnum, cden = ss2tf(A, B, C, D) + + # Continuous zpk representation + cz, cp, ck = ss2zpk(A, B, C, D) + + h = 1.0 + alpha = 0.25 + + # Explicit formulas, in the scalar case. + Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A) + Bd = h * B / (1 - alpha * h * A) + Cd = C / (1 - alpha * h * A) + Dd = D + alpha * C * Bd + + # Convert the explicit solution to tf + dnum, dden = ss2tf(Ad, Bd, Cd, Dd) + + # Compute the discrete tf using cont2discrete. + c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha) + + assert_allclose(dnum, c2dnum) + assert_allclose(dden, c2dden) + + # Convert explicit solution to zpk. + dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd) + + # Compute the discrete zpk using cont2discrete. + c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha) + + assert_allclose(dz, c2dz) + assert_allclose(dp, c2dp) + assert_allclose(dk, c2dk) + + def test_discrete_approx(self): + """ + Test that the solution to the discrete approximation of a continuous + system actually approximates the solution to the continuous system. + This is an indirect test of the correctness of the implementation + of cont2discrete. + """ + + def u(t): + return np.sin(2.5 * t) + + a = np.array([[-0.01]]) + b = np.array([[1.0]]) + c = np.array([[1.0]]) + d = np.array([[0.2]]) + x0 = 1.0 + + t = np.linspace(0, 10.0, 101) + dt = t[1] - t[0] + u1 = u(t) + + # Use lsim to compute the solution to the continuous system. + t, yout, xout = lsim((a, b, c, d), T=t, U=u1, X0=x0) + + # Convert the continuous system to a discrete approximation. + dsys = c2d((a, b, c, d), dt, method='bilinear') + + # Use dlsim with the pairwise averaged input to compute the output + # of the discrete system. + u2 = 0.5 * (u1[:-1] + u1[1:]) + t2 = t[:-1] + td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0) + + # ymid is the average of consecutive terms of the "exact" output + # computed by lsim2. This is what the discrete approximation + # actually approximates. + ymid = 0.5 * (yout[:-1] + yout[1:]) + + assert_allclose(yd2.ravel(), ymid, rtol=1e-4) + + def test_simo_tf(self): + # See gh-5753 + tf = ([[1, 0], [1, 1]], [1, 1]) + num, den, dt = c2d(tf, 0.01) + + assert_equal(dt, 0.01) # sanity check + assert_allclose(den, [1, -0.990404983], rtol=1e-3) + assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3) + + def test_multioutput(self): + ts = 0.01 # time step + + tf = ([[1, -3], [1, 5]], [1, 1]) + num, den, dt = c2d(tf, ts) + + tf1 = (tf[0][0], tf[1]) + num1, den1, dt1 = c2d(tf1, ts) + + tf2 = (tf[0][1], tf[1]) + num2, den2, dt2 = c2d(tf2, ts) + + # Sanity checks + assert_equal(dt, dt1) + assert_equal(dt, dt2) + + # Check that we get the same results + assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13) + + # Single input, so the denominator should + # not be multidimensional like the numerator + assert_allclose(den, den1, rtol=1e-13) + assert_allclose(den, den2, rtol=1e-13) + +class TestC2dLti: + def test_c2d_ss(self): + # StateSpace + A = np.array([[-0.3, 0.1], [0.2, -0.7]]) + B = np.array([[0], [1]]) + C = np.array([[1, 0]]) + D = 0 + + A_res = np.array([[0.985136404135682, 0.004876671474795], + [0.009753342949590, 0.965629718236502]]) + B_res = np.array([[0.000122937599964], [0.049135527547844]]) + + sys_ssc = lti(A, B, C, D) + sys_ssd = sys_ssc.to_discrete(0.05) + + assert_allclose(sys_ssd.A, A_res) + assert_allclose(sys_ssd.B, B_res) + assert_allclose(sys_ssd.C, C) + assert_allclose(sys_ssd.D, D) + + def test_c2d_tf(self): + + sys = lti([0.5, 0.3], [1.0, 0.4]) + sys = sys.to_discrete(0.005) + + # Matlab results + num_res = np.array([0.5, -0.485149004980066]) + den_res = np.array([1.0, -0.980198673306755]) + + # Somehow a lot of numerical errors + assert_allclose(sys.den, den_res, atol=0.02) + assert_allclose(sys.num, num_res, atol=0.02) + + +class TestC2dInvariants: + # Some test cases for checking the invariances. + # Array of triplets: (system, sample time, number of samples) + cases = [ + (tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10), + (tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10), + (tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10), + ] + + # Check that systems discretized with the impulse-invariant + # method really hold the invariant + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_impulse_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont = impulse(sys, T=time) + _, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'), + n=len(time)) + assert_allclose(sample_time * yout_cont.ravel(), yout_disc[0].ravel()) + + # Step invariant should hold for ZOH discretized systems + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_step_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont = step(sys, T=time) + _, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time)) + assert_allclose(yout_cont.ravel(), yout_disc[0].ravel()) + + # Linear invariant should hold for FOH discretized systems + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_linear_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont, _ = lsim(sys, T=time, U=time) + _, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time) + assert_allclose(yout_cont.ravel(), yout_disc.ravel()) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a3c0e37c83812f525f2565144fcf9b1d7eeffd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py @@ -0,0 +1,219 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +''' +A unit test module for czt.py +''' +import pytest +from numpy.testing import assert_allclose +from scipy.fft import fft +from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT) +import numpy as np + + +def check_czt(x): + # Check that czt is the equivalent of normal fft + y = fft(x) + y1 = czt(x) + assert_allclose(y1, y, rtol=1e-13) + + # Check that interpolated czt is the equivalent of normal fft + y = fft(x, 100*len(x)) + y1 = czt(x, 100*len(x)) + assert_allclose(y1, y, rtol=1e-12) + + +def check_zoom_fft(x): + # Check that zoom_fft is the equivalent of normal fft + y = fft(x) + y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True) + assert_allclose(y1, y, rtol=1e-11, atol=1e-14) + y1 = zoom_fft(x, [0, 2]) + assert_allclose(y1, y, rtol=1e-11, atol=1e-14) + + # Test fn scalar + y1 = zoom_fft(x, 2-2./len(y), endpoint=True) + assert_allclose(y1, y, rtol=1e-11, atol=1e-14) + y1 = zoom_fft(x, 2) + assert_allclose(y1, y, rtol=1e-11, atol=1e-14) + + # Check that zoom_fft with oversampling is equivalent to zero padding + over = 10 + yover = fft(x, over*len(x)) + y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True) + assert_allclose(y2, yover, rtol=1e-12, atol=1e-10) + y2 = zoom_fft(x, [0, 2], m=len(yover)) + assert_allclose(y2, yover, rtol=1e-12, atol=1e-10) + + # Check that zoom_fft works on a subrange + w = np.linspace(0, 2-2./len(x), len(x)) + f1, f2 = w[3], w[6] + y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True) + idx3 = slice(3*over, 6*over+1) + assert_allclose(y3, yover[idx3], rtol=1e-13) + + +def test_1D(): + # Test of 1D version of the transforms + + np.random.seed(0) # Deterministic randomness + + # Random signals + lengths = np.random.randint(8, 200, 20) + np.append(lengths, 1) + for length in lengths: + x = np.random.random(length) + check_zoom_fft(x) + check_czt(x) + + # Gauss + t = np.linspace(-2, 2, 128) + x = np.exp(-t**2/0.01) + check_zoom_fft(x) + + # Linear + x = [1, 2, 3, 4, 5, 6, 7] + check_zoom_fft(x) + + # Check near powers of two + check_zoom_fft(range(126-31)) + check_zoom_fft(range(127-31)) + check_zoom_fft(range(128-31)) + check_zoom_fft(range(129-31)) + check_zoom_fft(range(130-31)) + + # Check transform on n-D array input + x = np.reshape(np.arange(3*2*28), (3, 2, 28)) + y1 = zoom_fft(x, [0, 2-2./28]) + y2 = zoom_fft(x[2, 0, :], [0, 2-2./28]) + assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12) + + y1 = zoom_fft(x, [0, 2], endpoint=False) + y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False) + assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12) + + # Random (not a test condition) + x = np.random.rand(101) + check_zoom_fft(x) + + # Spikes + t = np.linspace(0, 1, 128) + x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13) + check_zoom_fft(x) + + # Sines + x = np.zeros(100, dtype=complex) + x[[1, 5, 21]] = 1 + check_zoom_fft(x) + + # Sines plus complex component + x += 1j*np.linspace(0, 0.5, x.shape[0]) + check_zoom_fft(x) + + +def test_large_prime_lengths(): + np.random.seed(0) # Deterministic randomness + for N in (101, 1009, 10007): + x = np.random.rand(N) + y = fft(x) + y1 = czt(x) + assert_allclose(y, y1, rtol=1e-12) + + +@pytest.mark.slow +def test_czt_vs_fft(): + np.random.seed(123) + random_lengths = np.random.exponential(100000, size=10).astype('int') + for n in random_lengths: + a = np.random.randn(n) + assert_allclose(czt(a), fft(a), rtol=1e-11) + + +def test_empty_input(): + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt([]) + with pytest.raises(ValueError, match='Invalid number of CZT'): + zoom_fft([], 0.5) + + +def test_0_rank_input(): + with pytest.raises(IndexError, match='tuple index out of range'): + czt(5) + with pytest.raises(IndexError, match='tuple index out of range'): + zoom_fft(5, 0.5) + + +@pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0], + np.concatenate((np.array([0, 0, 1]), + np.zeros(100))))) +@pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021)) +@pytest.mark.parametrize('a', (1, 2, 0.5, 1.1)) +# Step that tests away from the unit circle, but not so far it explodes from +# numerical error +@pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j)) +def test_czt_math(impulse, m, w, a): + # z-transform of an impulse is 1 everywhere + assert_allclose(czt(impulse[2:], m=m, w=w, a=a), + np.ones(m), rtol=1e-10) + + # z-transform of a delayed impulse is z**-1 + assert_allclose(czt(impulse[1:], m=m, w=w, a=a), + czt_points(m=m, w=w, a=a)**-1, rtol=1e-10) + + # z-transform of a 2-delayed impulse is z**-2 + assert_allclose(czt(impulse, m=m, w=w, a=a), + czt_points(m=m, w=w, a=a)**-2, rtol=1e-10) + + +def test_int_args(): + # Integer argument `a` was producing all 0s + assert_allclose(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15) + assert_allclose(czt_points(11, w=2), 1/(2**np.arange(11)), rtol=1e-30) + + +def test_czt_points(): + for N in (1, 2, 3, 8, 11, 100, 101, 10007): + assert_allclose(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N), + rtol=1e-30) + + assert_allclose(czt_points(7, w=1), np.ones(7), rtol=1e-30) + assert_allclose(czt_points(11, w=2.), 1/(2**np.arange(11)), rtol=1e-30) + + func = CZT(12, m=11, w=2., a=1) + assert_allclose(func.points(), 1/(2**np.arange(11)), rtol=1e-30) + + +@pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))]) +def test_CZT_size_mismatch(cls, args): + # Data size doesn't match function's expected size + myfunc = cls(*args) + with pytest.raises(ValueError, match='CZT defined for'): + myfunc(np.arange(5)) + + +def test_invalid_range(): + with pytest.raises(ValueError, match='2-length sequence'): + ZoomFFT(100, [1, 2, 3]) + + +@pytest.mark.parametrize('m', [0, -11, 5.5, 4.0]) +def test_czt_points_errors(m): + # Invalid number of points + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt_points(m) + + +@pytest.mark.parametrize('size', [0, -5, 3.5, 4.0]) +def test_nonsense_size(size): + # Numpy and Scipy fft() give ValueError for 0 output size, so we do, too + with pytest.raises(ValueError, match='Invalid number of CZT'): + CZT(size, 3) + with pytest.raises(ValueError, match='Invalid number of CZT'): + ZoomFFT(size, 0.2, 3) + with pytest.raises(ValueError, match='Invalid number of CZT'): + CZT(3, size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + ZoomFFT(3, 0.2, size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt([1, 2, 3], size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + zoom_fft([1, 2, 3], 0.2, size) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..af69f109cd3e6e72858d69bcb338ddd61b18e3ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py @@ -0,0 +1,1221 @@ +import warnings + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, + assert_, suppress_warnings) +from pytest import raises as assert_raises + +from scipy.signal import (ss2tf, tf2ss, lti, + dlti, bode, freqresp, lsim, impulse, step, + abcd_normalize, place_poles, + TransferFunction, StateSpace, ZerosPolesGain) +from scipy.signal._filter_design import BadCoefficients +import scipy.linalg as linalg + + +def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8): + """ + Check each pole in P1 is close to a pole in P2 with a 1e-8 + relative tolerance or 1e-8 absolute tolerance (useful for zero poles). + These tolerances are very strict but the systems tested are known to + accept these poles so we should not be far from what is requested. + """ + P2 = P2.copy() + for p1 in P1: + found = False + for p2_idx in range(P2.shape[0]): + if np.allclose([np.real(p1), np.imag(p1)], + [np.real(P2[p2_idx]), np.imag(P2[p2_idx])], + rtol, atol): + found = True + np.delete(P2, p2_idx) + break + if not found: + raise ValueError("Can't find pole " + str(p1) + " in " + str(P2)) + + +class TestPlacePoles: + + def _check(self, A, B, P, **kwargs): + """ + Perform the most common tests on the poles computed by place_poles + and return the Bunch object for further specific tests + """ + fsf = place_poles(A, B, P, **kwargs) + expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix)) + _assert_poles_close(expected, fsf.requested_poles) + _assert_poles_close(expected, fsf.computed_poles) + _assert_poles_close(P,fsf.requested_poles) + return fsf + + def test_real(self): + # Test real pole placement using KNV and YT0 algorithm and example 1 in + # section 4 of the reference publication (see place_poles docstring) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2) + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + # Check that both KNV and YT compute correct K matrix + self._check(A, B, P, method='KNV0') + self._check(A, B, P, method='YT') + + # Try to reach the specific case in _YT_real where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + # on some architectures this can lead to a RuntimeWarning invalid + # value in divide (see gh-7590), so suppress it for now + with np.errstate(invalid='ignore'): + self._check(A, B, (2,2,3,3)) + + def test_complex(self): + # Test complex pole placement on a linearized car model, taken from L. + # Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE + # editions p 184/185 + A = np.array([[0, 7, 0, 0], + [0, 0, 0, 7/3.], + [0, 0, 0, 0], + [0, 0, 0, 0]]) + B = np.array([[0, 0], + [0, 0], + [1, 0], + [0, 1]]) + # Test complex poles on YT + P = np.array([-3, -1, -2-1j, -2+1j]) + # on macOS arm64 this can lead to a RuntimeWarning invalid + # value in divide, so suppress it for now + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P) + + # Try to reach the specific case in _YT_complex where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + P = [0-1e-6j,0+1e-6j,-10,10] + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P, maxiter=1000) + + # Try to reach the specific case in _YT_complex where the rank two + # update yields two null vectors. This test was found via Monte Carlo. + + A = np.array( + [-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546, + -167, -754, -2285, -543, -1700, -584, -2978, -925, -1300, + -1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709, + -291, -338, -153, -1804, -1106, -1168, -867, -2297] + ).reshape(6,6) + + B = np.array( + [-108, -374, -524, -1285, -1232, -161, -1204, -672, -637, + -15, -483, -23, -931, -780, -1245, -1129, -1290, -1502, + -952, -1374, -62, -964, -930, -939, -792, -756, -1437, + -491, -1543, -686] + ).reshape(6,5) + P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j] + self._check(A, B, P) + + # Use a lot of poles to go through all cases for update_order + # in _YT_loop + + big_A = np.ones((11,11))-np.eye(11) + big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:] + big_A[:6,:6] = A + big_B[:6,:5] = B + + P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j] + with np.errstate(divide='ignore', invalid='ignore'): + self._check(big_A, big_B, P) + + #check with only complex poles and only real poles + P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j, + -10-10j,-20-20j,-30-30j,-40-40j,-50-50j] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + + # need a 5x5 array to ensure YT handles properly when there + # is only one real pole and several complex + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0, + 0,0,0,5,0,0,0,0,9]).reshape(5,5) + B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2) + P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j]) + with np.errstate(divide='ignore', invalid='ignore'): + place_poles(A, B, P) + + # same test with an odd number of real poles > 1 + # this is another specific case of YT + P = np.array([-2, -3, -4, -1+1j, -1-1j]) + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P) + + def test_tricky_B(self): + # check we handle as we should the 1 column B matrices and + # n column B matrices (with n such as shape(A)=(n, n)) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4, + 5, 6, 7, 8]).reshape(4, 4) + + # KNV or YT are not called here, it's a specific case with only + # one unique solution + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + fsf = self._check(A, B, P) + # rtol and nb_iter should be set to np.nan as the identity can be + # used as transfer matrix + assert_equal(fsf.rtol, np.nan) + assert_equal(fsf.nb_iter, np.nan) + + # check with complex poles too as they trigger a specific case in + # the specific case :-) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + assert_equal(fsf.rtol, np.nan) + assert_equal(fsf.nb_iter, np.nan) + + #now test with a B matrix with only one column (no optimisation) + B = B[:,0].reshape(4,1) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + + # we can't optimize anything, check they are set to 0 as expected + assert_equal(fsf.rtol, 0) + assert_equal(fsf.nb_iter, 0) + + def test_errors(self): + # Test input mistakes from user + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4) + B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2) + + #should fail as the method keyword is invalid + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + method="foo") + + #should fail as poles are not 1D array + assert_raises(ValueError, place_poles, A, B, + np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1)) + + #should fail as A is not a 2D array + assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B, + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as B is not a 2D array + assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis], + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as there are too many poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3)) + + #should fail as there are not enough poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3)) + + #should fail as the rtol is greater than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + rtol=42) + + #should fail as maxiter is smaller than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + maxiter=-42) + + # should fail as ndim(B) is two + assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2)) + + #unctrollable system + assert_raises(ValueError, place_poles, np.ones((4,4)), + np.ones((4,2)), (1,2,3,4)) + + # Should not raise ValueError as the poles can be placed but should + # raise a warning as the convergence is not reached + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42) + assert_(len(w) == 1) + assert_(issubclass(w[-1].category, UserWarning)) + assert_("Convergence was not reached after maxiter iterations" + in str(w[-1].message)) + assert_equal(fsf.nb_iter, 42) + + # should fail as a complex misses its conjugate + assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2)) + + # should fail as A is not square + assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5)) + + # should fail as B has not the same number of lines as A + assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5)) + + # should fail as KNV0 does not support complex poles + assert_raises(ValueError, place_poles, A, B, + (-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0") + + +class TestSS2TF: + + def check_matrix_shapes(self, p, q, r): + ss2tf(np.zeros((p, p)), + np.zeros((p, q)), + np.zeros((r, p)), + np.zeros((r, q)), 0) + + def test_shapes(self): + # Each tuple holds: + # number of states, number of inputs, number of outputs + for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]: + self.check_matrix_shapes(p, q, r) + + def test_basic(self): + # Test a round trip through tf2ss and ss2tf. + b = np.array([1.0, 3.0, 5.0]) + a = np.array([1.0, 2.0, 3.0]) + + A, B, C, D = tf2ss(b, a) + assert_allclose(A, [[-2, -3], [1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0]], rtol=1e-13) + assert_allclose(C, [[1, 2]], rtol=1e-13) + assert_allclose(D, [[1]], rtol=1e-14) + + bb, aa = ss2tf(A, B, C, D) + assert_allclose(bb[0], b, rtol=1e-13) + assert_allclose(aa, a, rtol=1e-13) + + def test_zero_order_round_trip(self): + # See gh-5760 + tf = (2, 1) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[0]], rtol=1e-13) + assert_allclose(B, [[0]], rtol=1e-13) + assert_allclose(C, [[0]], rtol=1e-13) + assert_allclose(D, [[2]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[2, 0]], rtol=1e-13) + assert_allclose(den, [1, 0], rtol=1e-13) + + tf = ([[5], [2]], 1) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[0]], rtol=1e-13) + assert_allclose(B, [[0]], rtol=1e-13) + assert_allclose(C, [[0], [0]], rtol=1e-13) + assert_allclose(D, [[5], [2]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[5, 0], [2, 0]], rtol=1e-13) + assert_allclose(den, [1, 0], rtol=1e-13) + + def test_simo_round_trip(self): + # See gh-5753 + tf = ([[1, 2], [1, 1]], [1, 2]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-2]], rtol=1e-13) + assert_allclose(B, [[1]], rtol=1e-13) + assert_allclose(C, [[0], [-1]], rtol=1e-13) + assert_allclose(D, [[1], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[1, 2], [1, 1]], rtol=1e-13) + assert_allclose(den, [1, 2], rtol=1e-13) + + tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-1, -1], [1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0]], rtol=1e-13) + assert_allclose(C, [[-1, 0], [0, 0]], rtol=1e-13) + assert_allclose(D, [[1], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[1, 0, 1], [1, 1, 1]], rtol=1e-13) + assert_allclose(den, [1, 1, 1], rtol=1e-13) + + tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-2, -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0], [0]], rtol=1e-13) + assert_allclose(C, [[1, 2, 3], [1, 2, 3]], rtol=1e-13) + assert_allclose(D, [[0], [0]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0, 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13) + assert_allclose(den, [1, 2, 3, 4], rtol=1e-13) + + tf = (np.array([1, [2, 3]], dtype=object), [1, 6]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-6]], rtol=1e-31) + assert_allclose(B, [[1]], rtol=1e-31) + assert_allclose(C, [[1], [-9]], rtol=1e-31) + assert_allclose(D, [[0], [2]], rtol=1e-31) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0, 1], [2, 3]], rtol=1e-13) + assert_allclose(den, [1, 6], rtol=1e-13) + + tf = (np.array([[1, -3], [1, 2, 3]], dtype=object), [1, 6, 5]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-6, -5], [1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0]], rtol=1e-13) + assert_allclose(C, [[1, -3], [-4, -2]], rtol=1e-13) + assert_allclose(D, [[0], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0, 1, -3], [1, 2, 3]], rtol=1e-13) + assert_allclose(den, [1, 6, 5], rtol=1e-13) + + def test_all_int_arrays(self): + A = [[0, 1, 0], [0, 0, 1], [-3, -4, -2]] + B = [[0], [0], [1]] + C = [[5, 1, 0]] + D = [[0]] + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0.0, 0.0, 1.0, 5.0]], rtol=1e-13, atol=1e-14) + assert_allclose(den, [1.0, 2.0, 4.0, 3.0], rtol=1e-13) + + def test_multioutput(self): + # Regression test for gh-2669. + + # 4 states + A = np.array([[-1.0, 0.0, 1.0, 0.0], + [-1.0, 0.0, 2.0, 0.0], + [-4.0, 0.0, 3.0, 0.0], + [-8.0, 8.0, 0.0, 4.0]]) + + # 1 input + B = np.array([[0.3], + [0.0], + [7.0], + [0.0]]) + + # 3 outputs + C = np.array([[0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [8.0, 8.0, 0.0, 0.0]]) + + D = np.array([[0.0], + [0.0], + [1.0]]) + + # Get the transfer functions for all the outputs in one call. + b_all, a = ss2tf(A, B, C, D) + + # Get the transfer functions for each output separately. + b0, a0 = ss2tf(A, B, C[0], D[0]) + b1, a1 = ss2tf(A, B, C[1], D[1]) + b2, a2 = ss2tf(A, B, C[2], D[2]) + + # Check that we got the same results. + assert_allclose(a0, a, rtol=1e-13) + assert_allclose(a1, a, rtol=1e-13) + assert_allclose(a2, a, rtol=1e-13) + assert_allclose(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14) + + +class TestLsim: + digits_accuracy = 7 + + def lti_nowarn(self, *args): + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(*args) + return system + + def test_first_order(self): + # y' = -y + # exact solution is y(t) = exp(-t) + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_x = np.exp(-tout) + assert_almost_equal(x, expected_x) + assert_almost_equal(y, expected_x) + + def test_second_order(self): + t = np.linspace(0, 10, 1001) + u = np.zeros_like(t) + # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. + # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution + # is (1-t)*exp(-t). + system = self.lti_nowarn([1.0], [1.0, 2.0, 1.0]) + tout, y, x = lsim(system, u, t, X0=[1.0, 0.0]) + expected_x = (1.0 - tout) * np.exp(-tout) + assert_almost_equal(x[:, 0], expected_x) + + def test_integrator(self): + # integrator: y' = u + system = self.lti_nowarn(0., 1., 1., 0.) + t = np.linspace(0,5) + u = t + tout, y, x = lsim(system, u, t) + expected_x = 0.5 * tout**2 + assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) + assert_almost_equal(y, expected_x, decimal=self.digits_accuracy) + + def test_two_states(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1, 2)) + + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 10.0, 21) + u = np.zeros((len(t), 2)) + tout, y, x = lsim(system, U=u, T=t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0 * tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:, 0], expected_x0) + assert_almost_equal(x[:, 1], expected_x1) + + def test_double_integrator(self): + # double integrator: y'' = 2u + A = np.array([[0., 1.], [0., 0.]]) + B = np.array([[0.], [1.]]) + C = np.array([[2., 0.]]) + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.ones_like(t) + tout, y, x = lsim(system, u, t) + expected_x = np.transpose(np.array([0.5 * tout**2, tout])) + expected_y = tout**2 + assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) + assert_almost_equal(y, expected_y, decimal=self.digits_accuracy) + + def test_jordan_block(self): + # Non-diagonalizable A matrix + # x1' + x1 = x2 + # x2' + x2 = u + # y = x1 + # Exact solution with u = 0 is y(t) = t exp(-t) + A = np.array([[-1., 1.], [0., -1.]]) + B = np.array([[0.], [1.]]) + C = np.array([[1., 0.]]) + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[0.0, 1.0]) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_miso(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1,2)) + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 5.0, 101) + u = np.zeros((len(t), 2)) + tout, y, x = lsim(system, u, t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0*tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:,0], expected_x0) + assert_almost_equal(x[:,1], expected_x1) + + def test_nonzero_initial_time(self): + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(1,2) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_nonequal_timesteps(self): + t = np.array([0.0, 1.0, 1.0, 3.0]) + u = np.array([0.0, 0.0, 1.0, 1.0]) + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0, 0.0]) + with assert_raises(ValueError, + match="Time steps are not equally spaced."): + tout, y, x = lsim(system, u, t, X0=[1.0]) + + +class TestImpulse: + def test_first_order(self): + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_fixed_time(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = impulse(system, T=t) + assert_equal(tout.shape, (n,)) + assert_almost_equal(tout, t) + expected_y = np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_first_order_initial(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system, X0=3.0) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_initial_list(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system, X0=[3.0]) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_integrator(self): + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0,0.0]) + tout, y = impulse(system) + expected_y = np.ones_like(tout) + assert_almost_equal(y, expected_y) + + def test_second_order(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact impulse response is t*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = impulse(system) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = impulse(system, X0=[3], T=[5, 6]) + tout, y = impulse(system, X0=[3], T=[5]) + + def test_array_like2(self): + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = impulse(system, X0=3, T=5) + + +class TestStep: + def test_first_order(self): + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system) + expected_y = 1.0 - np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_fixed_time(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = step(system, T=t) + assert_equal(tout.shape, (n,)) + assert_almost_equal(tout, t) + expected_y = 1 - np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_first_order_initial(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system, X0=3.0) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_initial_list(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system, X0=[3.0]) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_integrator(self): + # Simple integrator: x'(t) = u(t) + # Exact step response is x(t) = t. + system = ([1.0],[1.0,0.0]) + tout, y = step(system) + expected_y = tout + assert_almost_equal(y, expected_y) + + def test_second_order(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact step response is 1 - (1 + t)*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = step(system) + expected_y = 1 - (1 + tout) * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = step(system, T=[5, 6]) + + def test_complex_input(self): + # Test that complex input doesn't raise an error. + # `step` doesn't seem to have been designed for complex input, but this + # works and may be used, so add regression test. See gh-2654. + step(([], [-1], 1+0j)) + + +class TestLti: + def test_lti_instantiation(self): + # Test that lti can be instantiated with sequences, scalars. + # See PR-225. + + # TransferFunction + s = lti([1], [-1]) + assert_(isinstance(s, TransferFunction)) + assert_(isinstance(s, lti)) + assert_(not isinstance(s, dlti)) + assert_(s.dt is None) + + # ZerosPolesGain + s = lti(np.array([]), np.array([-1]), 1) + assert_(isinstance(s, ZerosPolesGain)) + assert_(isinstance(s, lti)) + assert_(not isinstance(s, dlti)) + assert_(s.dt is None) + + # StateSpace + s = lti([], [-1], 1) + s = lti([1], [-1], 1, 3) + assert_(isinstance(s, StateSpace)) + assert_(isinstance(s, lti)) + assert_(not isinstance(s, dlti)) + assert_(s.dt is None) + + +class TestStateSpace: + def test_initialization(self): + # Check that all initializations work + StateSpace(1, 1, 1, 1) + StateSpace([1], [2], [3], [4]) + StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), + np.array([[1, 0]]), np.array([[0]])) + + def test_conversion(self): + # Check the conversion functions + s = StateSpace(1, 2, 3, 4) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(StateSpace(s) is not s) + assert_(s.to_ss() is not s) + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_tf() and to_zpk() + + # Getters + s = StateSpace(1, 1, 1, 1) + assert_equal(s.poles, [1]) + assert_equal(s.zeros, [0]) + assert_(s.dt is None) + + def test_operators(self): + # Test +/-/* operators on systems + + class BadType: + pass + + s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]), + ) + + s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]) + ) + + s_discrete = s1.to_discrete(0.1) + s2_discrete = s2.to_discrete(0.2) + s3_discrete = s2.to_discrete(0.1) + + # Impulse response + t = np.linspace(0, 1, 100) + u = np.zeros_like(t) + u[0] = 1 + + # Test multiplication + for typ in (int, float, complex, np.float32, np.complex128, np.array): + assert_allclose(lsim(typ(2) * s1, U=u, T=t)[1], + typ(2) * lsim(s1, U=u, T=t)[1]) + + assert_allclose(lsim(s1 * typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] * typ(2)) + + assert_allclose(lsim(s1 / typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] / typ(2)) + + with assert_raises(TypeError): + typ(2) / s1 + + assert_allclose(lsim(s1 * 2, U=u, T=t)[1], + lsim(s1, U=2 * u, T=t)[1]) + + assert_allclose(lsim(s1 * s2, U=u, T=t)[1], + lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1], + atol=1e-5) + + with assert_raises(TypeError): + s1 / s1 + + with assert_raises(TypeError): + s1 * s_discrete + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete * s2_discrete + + with assert_raises(TypeError): + s1 * BadType() + + with assert_raises(TypeError): + BadType() * s1 + + with assert_raises(TypeError): + s1 / BadType() + + with assert_raises(TypeError): + BadType() / s1 + + # Test addition + assert_allclose(lsim(s1 + 2, U=u, T=t)[1], + 2 * u + lsim(s1, U=u, T=t)[1]) + + # Check for dimension mismatch + with assert_raises(ValueError): + s1 + np.array([1, 2]) + + with assert_raises(ValueError): + np.array([1, 2]) + s1 + + with assert_raises(TypeError): + s1 + s_discrete + + with assert_raises(ValueError): + s1 / np.array([[1, 2], [3, 4]]) + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete + s2_discrete + + with assert_raises(TypeError): + s1 + BadType() + + with assert_raises(TypeError): + BadType() + s1 + + assert_allclose(lsim(s1 + s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1]) + + # Test subtraction + assert_allclose(lsim(s1 - 2, U=u, T=t)[1], + -2 * u + lsim(s1, U=u, T=t)[1]) + + assert_allclose(lsim(2 - s1, U=u, T=t)[1], + 2 * u + lsim(-s1, U=u, T=t)[1]) + + assert_allclose(lsim(s1 - s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1]) + + with assert_raises(TypeError): + s1 - BadType() + + with assert_raises(TypeError): + BadType() - s1 + + s = s_discrete + s3_discrete + assert_(s.dt == 0.1) + + s = s_discrete * s3_discrete + assert_(s.dt == 0.1) + + s = 3 * s_discrete + assert_(s.dt == 0.1) + + s = -s_discrete + assert_(s.dt == 0.1) + +class TestTransferFunction: + def test_initialization(self): + # Check that all initializations work + TransferFunction(1, 1) + TransferFunction([1], [2]) + TransferFunction(np.array([1]), np.array([2])) + + def test_conversion(self): + # Check the conversion functions + s = TransferFunction([1, 0], [1, -1]) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(TransferFunction(s) is not s) + assert_(s.to_tf() is not s) + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_ss() and to_zpk() + + # Getters + s = TransferFunction([1, 0], [1, -1]) + assert_equal(s.poles, [1]) + assert_equal(s.zeros, [0]) + + +class TestZerosPolesGain: + def test_initialization(self): + # Check that all initializations work + ZerosPolesGain(1, 1, 1) + ZerosPolesGain([1], [2], 1) + ZerosPolesGain(np.array([1]), np.array([2]), 1) + + def test_conversion(self): + #Check the conversion functions + s = ZerosPolesGain(1, 2, 3) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(ZerosPolesGain(s) is not s) + assert_(s.to_zpk() is not s) + + +class Test_abcd_normalize: + def setup_method(self): + self.A = np.array([[1.0, 2.0], [3.0, 4.0]]) + self.B = np.array([[-1.0], [5.0]]) + self.C = np.array([[4.0, 5.0]]) + self.D = np.array([[2.5]]) + + def test_no_matrix_fails(self): + assert_raises(ValueError, abcd_normalize) + + def test_A_nosquare_fails(self): + assert_raises(ValueError, abcd_normalize, [1, -1], + self.B, self.C, self.D) + + def test_AB_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_AC_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + [[4.0], [5.0]], self.D) + + def test_CD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + self.C, [2.5, 0]) + + def test_BD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_normalized_matrices_unchanged(self): + A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D) + assert_equal(A, self.A) + assert_equal(B, self.B) + assert_equal(C, self.C) + assert_equal(D, self.D) + + def test_shapes(self): + A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(A.shape[0], C.shape[1]) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + + def test_zero_dimension_is_not_none1(self): + B_ = np.zeros((2, 0)) + D_ = np.zeros((0, 0)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_) + assert_equal(A, self.A) + assert_equal(B, B_) + assert_equal(D, D_) + assert_equal(C.shape[0], D_.shape[0]) + assert_equal(C.shape[1], self.A.shape[0]) + + def test_zero_dimension_is_not_none2(self): + B_ = np.zeros((2, 0)) + C_ = np.zeros((0, 2)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_) + assert_equal(A, self.A) + assert_equal(B, B_) + assert_equal(C, C_) + assert_equal(D.shape[0], C_.shape[0]) + assert_equal(D.shape[1], B_.shape[1]) + + def test_missing_A(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) + + def test_missing_B(self): + A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D) + assert_equal(B.shape[0], A.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + assert_equal(B.shape, (self.A.shape[0], self.D.shape[1])) + + def test_missing_C(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(C.shape[1], A.shape[0]) + assert_equal(C.shape, (self.D.shape[0], self.A.shape[0])) + + def test_missing_D(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C) + assert_equal(D.shape[0], C.shape[0]) + assert_equal(D.shape[1], B.shape[1]) + assert_equal(D.shape, (self.C.shape[0], self.B.shape[1])) + + def test_missing_AB(self): + A, B, C, D = abcd_normalize(C=self.C, D=self.D) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + assert_equal(A.shape, (self.C.shape[1], self.C.shape[1])) + assert_equal(B.shape, (self.C.shape[1], self.D.shape[1])) + + def test_missing_AC(self): + A, B, C, D = abcd_normalize(B=self.B, D=self.D) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(C.shape[1], A.shape[0]) + assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) + assert_equal(C.shape, (self.D.shape[0], self.B.shape[0])) + + def test_missing_AD(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(D.shape[0], C.shape[0]) + assert_equal(D.shape[1], B.shape[1]) + assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) + assert_equal(D.shape, (self.C.shape[0], self.B.shape[1])) + + def test_missing_BC(self): + A, B, C, D = abcd_normalize(A=self.A, D=self.D) + assert_equal(B.shape[0], A.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(C.shape[1], A.shape[0]) + assert_equal(B.shape, (self.A.shape[0], self.D.shape[1])) + assert_equal(C.shape, (self.D.shape[0], self.A.shape[0])) + + def test_missing_ABC_fails(self): + assert_raises(ValueError, abcd_normalize, D=self.D) + + def test_missing_BD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C) + + def test_missing_CD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B) + + +class Test_bode: + + def test_01(self): + # Test bode() magnitude calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # cutoff: 1 rad/s, slope: -20 dB/decade + # H(s=0.1) ~= 0 dB + # H(s=1) ~= -3 dB + # H(s=10) ~= -20 dB + # H(s=100) ~= -40 dB + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + expected_mag = [0, -3, -20, -40] + assert_almost_equal(mag, expected_mag, decimal=1) + + def test_02(self): + # Test bode() phase calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # angle(H(s=0.1)) ~= -5.7 deg + # angle(H(s=1)) ~= -45 deg + # angle(H(s=10)) ~= -84.3 deg + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, mag, phase = bode(system, w=w) + expected_phase = [-5.7, -45, -84.3] + assert_almost_equal(phase, expected_phase, decimal=1) + + def test_03(self): + # Test bode() magnitude calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_mag = 20.0 * np.log10(abs(y)) + assert_almost_equal(mag, expected_mag) + + def test_04(self): + # Test bode() phase calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi + assert_almost_equal(phase, expected_phase) + + def test_05(self): + # Test that bode() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + n = 10 + # Expected range is from 0.01 to 10. + expected_w = np.logspace(-2, 1, n) + w, mag, phase = bode(system, n=n) + assert_almost_equal(w, expected_w) + + def test_06(self): + # Test that bode() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, mag, phase = bode(system, n=2) + assert_equal(w[0], 0.01) # a fail would give not-a-number + + def test_07(self): + # bode() should not fail on a system with pure imaginary poles. + # The test passes if bode doesn't raise an exception. + system = lti([1], [1, 0, 100]) + w, mag, phase = bode(system, n=2) + + def test_08(self): + # Test that bode() return continuous phase, issues/2331. + system = lti([], [-10, -30, -40, -60, -70], 1) + w, mag, phase = system.bode(w=np.logspace(-3, 40, 100)) + assert_almost_equal(min(phase), -450, decimal=15) + + def test_from_state_space(self): + # Ensure that bode works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) + # is the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0], [0.0], [1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, mag, phase = bode(system, n=100) + + expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6))) + assert_almost_equal(mag, expected_magnitude) + + +class Test_freqresp: + + def test_output_manual(self): + # Test freqresp() output calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # re(H(s=0.1)) ~= 0.99 + # re(H(s=1)) ~= 0.5 + # re(H(s=10)) ~= 0.0099 + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, H = freqresp(system, w=w) + expected_re = [0.99, 0.5, 0.0099] + expected_im = [-0.099, -0.5, -0.099] + assert_almost_equal(H.real, expected_re, decimal=1) + assert_almost_equal(H.imag, expected_im, decimal=1) + + def test_output(self): + # Test freqresp() output calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = np.polyval(system.num, s) / np.polyval(system.den, s) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + system = lti([1], [1, 1]) + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqresp(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_zero(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, H = freqresp(system, n=2) + assert_equal(w[0], 0.01) # a fail would give not-a-number + + def test_from_state_space(self): + # Ensure that freqresp works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) is + # the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0],[0.0],[1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, H = freqresp(system, n=100) + s = w * 1j + expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3)) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_from_zpk(self): + # 4th order low-pass filter: H(s) = 1 / (s + 1) + system = lti([],[-1]*4,[1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = 1 / (s + 1)**4 + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e79969974fb4ba376bbf4d935a7a94e3064a5a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py @@ -0,0 +1,65 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +from pytest import raises as assert_raises + +from numpy.fft import fft, ifft + +from scipy.signal import max_len_seq + + +class TestMLS: + + def test_mls_inputs(self): + # can't all be zero state + assert_raises(ValueError, max_len_seq, + 10, state=np.zeros(10)) + # wrong size state + assert_raises(ValueError, max_len_seq, 10, + state=np.ones(3)) + # wrong length + assert_raises(ValueError, max_len_seq, 10, length=-1) + assert_array_equal(max_len_seq(10, length=0)[0], []) + # unknown taps + assert_raises(ValueError, max_len_seq, 64) + # bad taps + assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1]) + + def test_mls_output(self): + # define some alternate working taps + alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4], + 8: [7, 5, 3]} + # assume the other bit levels work, too slow to test higher orders... + for nbits in range(2, 8): + for state in [None, np.round(np.random.rand(nbits))]: + for taps in [None, alt_taps[nbits]]: + if state is not None and np.all(state == 0): + state[0] = 1 # they can't all be zero + orig_m = max_len_seq(nbits, state=state, + taps=taps)[0] + m = 2. * orig_m - 1. # convert to +/- 1 representation + # First, make sure we got all 1's or -1 + err_msg = "mls had non binary terms" + assert_array_equal(np.abs(m), np.ones_like(m), + err_msg=err_msg) + # Test via circular cross-correlation, which is just mult. + # in the frequency domain with one signal conjugated + tester = np.real(ifft(fft(m) * np.conj(fft(m)))) + out_len = 2**nbits - 1 + # impulse amplitude == test_len + err_msg = "mls impulse has incorrect value" + assert_allclose(tester[0], out_len, err_msg=err_msg) + # steady-state is -1 + err_msg = "mls steady-state has incorrect value" + assert_allclose(tester[1:], np.full(out_len - 1, -1), + err_msg=err_msg) + # let's do the split thing using a couple options + for n in (1, 2**(nbits - 1)): + m1, s1 = max_len_seq(nbits, state=state, taps=taps, + length=n) + m2, s2 = max_len_seq(nbits, state=s1, taps=taps, + length=1) + m3, s3 = max_len_seq(nbits, state=s2, taps=taps, + length=out_len - n - 1) + new_m = np.concatenate((m1, m2, m3)) + assert_array_equal(orig_m, new_m) + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py new file mode 100644 index 0000000000000000000000000000000000000000..77380c5496364745775b0a3b6fcf149e72722398 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py @@ -0,0 +1,891 @@ +import copy + +import numpy as np +from numpy.testing import ( + assert_, + assert_equal, + assert_allclose, + assert_array_equal +) +import pytest +from pytest import raises, warns + +from scipy.signal._peak_finding import ( + argrelmax, + argrelmin, + peak_prominences, + peak_widths, + _unpack_condition_args, + find_peaks, + find_peaks_cwt, + _identify_ridge_lines +) +from scipy.signal.windows import gaussian +from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning + + +def _gen_gaussians(center_locs, sigmas, total_length): + xdata = np.arange(0, total_length).astype(float) + out_data = np.zeros(total_length, dtype=float) + for ind, sigma in enumerate(sigmas): + tmp = (xdata - center_locs[ind]) / sigma + out_data += np.exp(-(tmp**2)) + return out_data + + +def _gen_gaussians_even(sigmas, total_length): + num_peaks = len(sigmas) + delta = total_length / (num_peaks + 1) + center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int) + out_data = _gen_gaussians(center_locs, sigmas, total_length) + return out_data, center_locs + + +def _gen_ridge_line(start_locs, max_locs, length, distances, gaps): + """ + Generate coordinates for a ridge line. + + Will be a series of coordinates, starting a start_loc (length 2). + The maximum distance between any adjacent columns will be + `max_distance`, the max distance between adjacent rows + will be `map_gap'. + + `max_locs` should be the size of the intended matrix. The + ending coordinates are guaranteed to be less than `max_locs`, + although they may not approach `max_locs` at all. + """ + + def keep_bounds(num, max_val): + out = max(num, 0) + out = min(out, max_val) + return out + + gaps = copy.deepcopy(gaps) + distances = copy.deepcopy(distances) + + locs = np.zeros([length, 2], dtype=int) + locs[0, :] = start_locs + total_length = max_locs[0] - start_locs[0] - sum(gaps) + if total_length < length: + raise ValueError('Cannot generate ridge line according to constraints') + dist_int = length / len(distances) - 1 + gap_int = length / len(gaps) - 1 + for ind in range(1, length): + nextcol = locs[ind - 1, 1] + nextrow = locs[ind - 1, 0] + 1 + if (ind % dist_int == 0) and (len(distances) > 0): + nextcol += ((-1)**ind)*distances.pop() + if (ind % gap_int == 0) and (len(gaps) > 0): + nextrow += gaps.pop() + nextrow = keep_bounds(nextrow, max_locs[0]) + nextcol = keep_bounds(nextcol, max_locs[1]) + locs[ind, :] = [nextrow, nextcol] + + return [locs[:, 0], locs[:, 1]] + + +class TestLocalMaxima1d: + + def test_empty(self): + """Test with empty signal.""" + x = np.array([], dtype=np.float64) + for array in _local_maxima_1d(x): + assert_equal(array, np.array([])) + assert_(array.base is None) + + def test_linear(self): + """Test with linear signal.""" + x = np.linspace(0, 100) + for array in _local_maxima_1d(x): + assert_equal(array, np.array([])) + assert_(array.base is None) + + def test_simple(self): + """Test with simple signal.""" + x = np.linspace(-10, 10, 50) + x[2::3] += 1 + expected = np.arange(2, 50, 3) + for array in _local_maxima_1d(x): + # For plateaus of size 1, the edges are identical with the + # midpoints + assert_equal(array, expected) + assert_(array.base is None) + + def test_flat_maxima(self): + """Test if flat maxima are detected correctly.""" + x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10, + -5, -5, -5, -5, -5, -10]) + midpoints, left_edges, right_edges = _local_maxima_1d(x) + assert_equal(midpoints, np.array([2, 4, 8, 12, 18])) + assert_equal(left_edges, np.array([2, 4, 7, 11, 16])) + assert_equal(right_edges, np.array([2, 5, 9, 14, 20])) + + @pytest.mark.parametrize('x', [ + np.array([1., 0, 2]), + np.array([3., 3, 0, 4, 4]), + np.array([5., 5, 5, 0, 6, 6, 6]), + ]) + def test_signal_edges(self, x): + """Test if behavior on signal edges is correct.""" + for array in _local_maxima_1d(x): + assert_equal(array, np.array([])) + assert_(array.base is None) + + def test_exceptions(self): + """Test input validation and raised exceptions.""" + with raises(ValueError, match="wrong number of dimensions"): + _local_maxima_1d(np.ones((1, 1))) + with raises(ValueError, match="expected 'const float64_t'"): + _local_maxima_1d(np.ones(1, dtype=int)) + with raises(TypeError, match="list"): + _local_maxima_1d([1., 2.]) + with raises(TypeError, match="'x' must not be None"): + _local_maxima_1d(None) + + +class TestRidgeLines: + + def test_empty(self): + test_matr = np.zeros([20, 100]) + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert_(len(lines) == 0) + + def test_minimal(self): + test_matr = np.zeros([20, 100]) + test_matr[0, 10] = 1 + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert_(len(lines) == 1) + + test_matr = np.zeros([20, 100]) + test_matr[0:2, 10] = 1 + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert_(len(lines) == 1) + + def test_single_pass(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 0, 1] + test_matr = np.zeros([20, 50]) + 1e-12 + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_distances = np.full(20, max(distances)) + identified_lines = _identify_ridge_lines(test_matr, + max_distances, + max(gaps) + 1) + assert_array_equal(identified_lines, [line]) + + def test_single_bigdist(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 4] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 3 + max_distances = np.full(20, max_dist) + #This should get 2 lines, since the distance is too large + identified_lines = _identify_ridge_lines(test_matr, + max_distances, + max(gaps) + 1) + assert_(len(identified_lines) == 2) + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggap(self): + distances = [0, 1, 2, 5] + max_gap = 3 + gaps = [0, 4, 2, 1] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 6 + max_distances = np.full(20, max_dist) + #This should get 2 lines, since the gap is too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert_(len(identified_lines) == 2) + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggaps(self): + distances = [0] + max_gap = 1 + gaps = [3, 6] + test_matr = np.zeros([50, 50]) + length = 30 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 1 + max_distances = np.full(50, max_dist) + #This should get 3 lines, since the gaps are too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert_(len(identified_lines) == 3) + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + +class TestArgrel: + + def test_empty(self): + # Regression test for gh-2832. + # When there are no relative extrema, make sure that + # the number of empty arrays returned matches the + # dimension of the input. + + empty_array = np.array([], dtype=int) + + z1 = np.zeros(5) + + i = argrelmin(z1) + assert_equal(len(i), 1) + assert_array_equal(i[0], empty_array) + + z2 = np.zeros((3,5)) + + row, col = argrelmin(z2, axis=0) + assert_array_equal(row, empty_array) + assert_array_equal(col, empty_array) + + row, col = argrelmin(z2, axis=1) + assert_array_equal(row, empty_array) + assert_array_equal(col, empty_array) + + def test_basic(self): + # Note: the docstrings for the argrel{min,max,extrema} functions + # do not give a guarantee of the order of the indices, so we'll + # sort them before testing. + + x = np.array([[1, 2, 2, 3, 2], + [2, 1, 2, 2, 3], + [3, 2, 1, 2, 2], + [2, 3, 2, 1, 2], + [1, 2, 3, 2, 1]]) + + row, col = argrelmax(x, axis=0) + order = np.argsort(row) + assert_equal(row[order], [1, 2, 3]) + assert_equal(col[order], [4, 0, 1]) + + row, col = argrelmax(x, axis=1) + order = np.argsort(row) + assert_equal(row[order], [0, 3, 4]) + assert_equal(col[order], [3, 1, 2]) + + row, col = argrelmin(x, axis=0) + order = np.argsort(row) + assert_equal(row[order], [1, 2, 3]) + assert_equal(col[order], [1, 2, 3]) + + row, col = argrelmin(x, axis=1) + order = np.argsort(row) + assert_equal(row[order], [1, 2, 3]) + assert_equal(col[order], [1, 2, 3]) + + def test_highorder(self): + order = 2 + sigmas = [1.0, 2.0, 10.0, 5.0, 15.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 500) + test_data[act_locs + order] = test_data[act_locs]*0.99999 + test_data[act_locs - order] = test_data[act_locs]*0.99999 + rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0] + + assert_(len(rel_max_locs) == len(act_locs)) + assert_((rel_max_locs == act_locs).all()) + + def test_2d_gaussians(self): + sigmas = [1.0, 2.0, 10.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 100) + rot_factor = 20 + rot_range = np.arange(0, len(test_data)) - rot_factor + test_data_2 = np.vstack([test_data, test_data[rot_range]]) + rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1) + + for rw in range(0, test_data_2.shape[0]): + inds = (rel_max_rows == rw) + + assert_(len(rel_max_cols[inds]) == len(act_locs)) + assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all()) + + +class TestPeakProminences: + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + out = peak_prominences([1, 2, 3], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert_(arr.size == 0) + assert_(arr.dtype == dtype) + + out = peak_prominences([], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert_(arr.size == 0) + assert_(arr.dtype == dtype) + + def test_basic(self): + """ + Test if height of prominences is correctly calculated in signal with + rising baseline (peak widths are 1 sample). + """ + # Prepare basic signal + x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1]) + peaks = np.array([1, 2, 4, 6]) + lbases = np.array([0, 0, 0, 5]) + rbases = np.array([3, 3, 5, 7]) + proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0) + # Test if calculation matches handcrafted result + out = peak_prominences(x, peaks) + assert_equal(out[0], proms) + assert_equal(out[1], lbases) + assert_equal(out[2], rbases) + + def test_edge_cases(self): + """ + Test edge cases. + """ + # Peaks have same height, prominence and bases + x = [0, 2, 1, 2, 1, 2, 0] + peaks = [1, 3, 5] + proms, lbases, rbases = peak_prominences(x, peaks) + assert_equal(proms, [2, 2, 2]) + assert_equal(lbases, [0, 0, 0]) + assert_equal(rbases, [6, 6, 6]) + + # Peaks have same height & prominence but different bases + x = [0, 1, 0, 1, 0, 1, 0] + peaks = np.array([1, 3, 5]) + proms, lbases, rbases = peak_prominences(x, peaks) + assert_equal(proms, [1, 1, 1]) + assert_equal(lbases, peaks - 1) + assert_equal(rbases, peaks + 1) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([-9, 9, 9, 0, 3, 1], 2) + peaks = np.repeat([1, 2, 4], 2) + proms, lbases, rbases = peak_prominences(x[::2], peaks[::2]) + assert_equal(proms, [9, 9, 2]) + assert_equal(lbases, [0, 0, 3]) + assert_equal(rbases, [3, 3, 5]) + + def test_wlen(self): + """ + Test if wlen actually shrinks the evaluation range correctly. + """ + x = [0, 1, 2, 3, 1, 0, -1] + peak = [3] + # Test rounding behavior of wlen + assert_equal(peak_prominences(x, peak), [3., 0, 6]) + for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]: + assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i]) + + def test_exceptions(self): + """ + Verify that exceptions and warnings are raised. + """ + # x with dimension > 1 + with raises(ValueError, match='1-D array'): + peak_prominences([[0, 1, 1, 0]], [1, 2]) + # peaks with dimension > 1 + with raises(ValueError, match='1-D array'): + peak_prominences([0, 1, 1, 0], [[1, 2]]) + # x with dimension < 1 + with raises(ValueError, match='1-D array'): + peak_prominences(3, [0,]) + + # empty x with supplied + with raises(ValueError, match='not a valid index'): + peak_prominences([], [0]) + # invalid indices with non-empty x + for p in [-100, -1, 3, 1000]: + with raises(ValueError, match='not a valid index'): + peak_prominences([1, 0, 2], [p]) + + # peaks is not cast-able to np.intp + with raises(TypeError, match='cannot safely cast'): + peak_prominences([0, 1, 1, 0], [1.1, 2.3]) + + # wlen < 3 + with raises(ValueError, match='wlen'): + peak_prominences(np.arange(10), [3, 5], wlen=1) + + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a prominence of 0" + for p in [0, 1, 2]: + with warns(PeakPropertyWarning, match=msg): + peak_prominences([1, 0, 2], [p,]) + with warns(PeakPropertyWarning, match=msg): + peak_prominences([0, 1, 1, 1, 0], [2], wlen=2) + + +class TestPeakWidths: + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + widths = peak_widths([], [])[0] + assert_(isinstance(widths, np.ndarray)) + assert_equal(widths.size, 0) + widths = peak_widths([1, 2, 3], [])[0] + assert_(isinstance(widths, np.ndarray)) + assert_equal(widths.size, 0) + out = peak_widths([], []) + for arr in out: + assert_(isinstance(arr, np.ndarray)) + assert_equal(arr.size, 0) + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_basic(self): + """ + Test a simple use case with easy to verify results at different relative + heights. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1]) + prominence = 2 + for rel_height, width_true, lip_true, rip_true in [ + (0., 0., 3., 3.), # raises warning + (0.25, 1., 2.5, 3.5), + (0.5, 2., 2., 4.), + (0.75, 3., 1.5, 4.5), + (1., 4., 1., 5.), + (2., 5., 1., 6.), + (3., 5., 1., 6.) + ]: + width_calc, height, lip_calc, rip_calc = peak_widths( + x, [3], rel_height) + assert_allclose(width_calc, width_true) + assert_allclose(height, 2 - rel_height * prominence) + assert_allclose(lip_calc, lip_true) + assert_allclose(rip_calc, rip_true) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([0, 100, 50], 4) + peaks = np.repeat([1], 3) + result = peak_widths(x[::4], peaks[::3]) + assert_equal(result, [0.75, 75, 0.75, 1.5]) + + def test_exceptions(self): + """ + Verify that argument validation works as intended. + """ + with raises(ValueError, match='1-D array'): + # x with dimension > 1 + peak_widths(np.zeros((3, 4)), np.ones(3)) + with raises(ValueError, match='1-D array'): + # x with dimension < 1 + peak_widths(3, [0]) + with raises(ValueError, match='1-D array'): + # peaks with dimension > 1 + peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp)) + with raises(ValueError, match='1-D array'): + # peaks with dimension < 1 + peak_widths(np.arange(10), 3) + with raises(ValueError, match='not a valid index'): + # peak pos exceeds x.size + peak_widths(np.arange(10), [8, 11]) + with raises(ValueError, match='not a valid index'): + # empty x with peaks supplied + peak_widths([], [1, 2]) + with raises(TypeError, match='cannot safely cast'): + # peak cannot be safely casted to intp + peak_widths(np.arange(10), [1.1, 2.3]) + with raises(ValueError, match='rel_height'): + # rel_height is < 0 + peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1) + with raises(TypeError, match='None'): + # prominence data contains None + peak_widths([1, 2, 1], [1], prominence_data=(None, None, None)) + + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a width of 0" + with warns(PeakPropertyWarning, match=msg): + # Case: rel_height is 0 + peak_widths([0, 1, 0], [1], rel_height=0) + with warns(PeakPropertyWarning, match=msg): + # Case: prominence is 0 and bases are identical + peak_widths( + [0, 1, 1, 1, 0], [2], + prominence_data=(np.array([0.], np.float64), + np.array([2], np.intp), + np.array([2], np.intp)) + ) + + def test_mismatching_prominence_data(self): + """Test with mismatching peak and / or prominence data.""" + x = [0, 1, 0] + peak = [1] + for i, (prominences, left_bases, right_bases) in enumerate([ + ((1.,), (-1,), (2,)), # left base not in x + ((1.,), (0,), (3,)), # right base not in x + ((1.,), (2,), (0,)), # swapped bases same as peak + ((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks + ((1., 1.), (0,), (2,)), # arrays with different shapes + ((1.,), (0, 0), (2,)), # arrays with different shapes + ((1.,), (0,), (2, 2)) # arrays with different shapes + ]): + # Make sure input is matches output of signal.peak_prominences + prominence_data = (np.array(prominences, dtype=np.float64), + np.array(left_bases, dtype=np.intp), + np.array(right_bases, dtype=np.intp)) + # Test for correct exception + if i < 3: + match = "prominence data is invalid for peak" + else: + match = "arrays in `prominence_data` must have the same shape" + with raises(ValueError, match=match): + peak_widths(x, peak, prominence_data=prominence_data) + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_intersection_rules(self): + """Test if x == eval_height counts as an intersection.""" + # Flatt peak with two possible intersection points if evaluated at 1 + x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0] + # relative height is 0 -> width is 0 as well, raises warning + assert_allclose(peak_widths(x, peaks=[5], rel_height=0), + [(0.,), (3.,), (5.,), (5.,)]) + # width_height == x counts as intersection -> nearest 1 is chosen + assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3), + [(4.,), (1.,), (3.,), (7.,)]) + + +def test_unpack_condition_args(): + """ + Verify parsing of condition arguments for `scipy.signal.find_peaks` function. + """ + x = np.arange(10) + amin_true = x + amax_true = amin_true + 10 + peaks = amin_true[1::2] + + # Test unpacking with None or interval + assert_((None, None) == _unpack_condition_args((None, None), x, peaks)) + assert_((1, None) == _unpack_condition_args(1, x, peaks)) + assert_((1, None) == _unpack_condition_args((1, None), x, peaks)) + assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks)) + assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks)) + + # Test if borders are correctly reduced with `peaks` + amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks) + assert_equal(amin_calc, amin_true[peaks]) + assert_equal(amax_calc, amax_true[peaks]) + + # Test raises if array borders don't match x + with raises(ValueError, match="array size of lower"): + _unpack_condition_args(amin_true, np.arange(11), peaks) + with raises(ValueError, match="array size of upper"): + _unpack_condition_args((None, amin_true), np.arange(11), peaks) + + +class TestFindPeaks: + + # Keys of optionally returned properties + property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds', + 'prominences', 'left_bases', 'right_bases', 'widths', + 'width_heights', 'left_ips', 'right_ips'} + + def test_constant(self): + """ + Test behavior for signal without local maxima. + """ + open_interval = (None, None) + peaks, props = find_peaks(np.ones(10), + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert_(peaks.size == 0) + for key in self.property_keys: + assert_(props[key].size == 0) + + def test_plateau_size(self): + """ + Test plateau size condition for peaks. + """ + # Prepare signal with peaks with peak_height == plateau_size + plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111]) + x = np.zeros(plateau_sizes.size * 2 + 1) + x[1::2] = plateau_sizes + repeats = np.ones(x.size, dtype=int) + repeats[1::2] = x[1::2] + x = np.repeat(x, repeats) + + # Test full output + peaks, props = find_peaks(x, plateau_size=(None, None)) + assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100]) + assert_equal(props["plateau_sizes"], plateau_sizes) + assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2) + assert_equal(props["right_edges"], peaks + plateau_sizes // 2) + + # Test conditions + assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100]) + assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7]) + assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33]) + + def test_height_condition(self): + """ + Test height condition for peaks. + """ + x = (0., 1/3, 0., 2.5, 0, 4., 0) + peaks, props = find_peaks(x, height=(None, None)) + assert_equal(peaks, np.array([1, 3, 5])) + assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.])) + assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5])) + assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3])) + assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3])) + + def test_threshold_condition(self): + """ + Test threshold condition for peaks. + """ + x = (0, 2, 1, 4, -1) + peaks, props = find_peaks(x, threshold=(None, None)) + assert_equal(peaks, np.array([1, 3])) + assert_equal(props['left_thresholds'], np.array([2, 3])) + assert_equal(props['right_thresholds'], np.array([1, 5])) + assert_equal(find_peaks(x, threshold=2)[0], np.array([3])) + assert_equal(find_peaks(x, threshold=3.5)[0], np.array([])) + assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3])) + assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1])) + assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([])) + + def test_distance_condition(self): + """ + Test distance condition for peaks. + """ + # Peaks of different height with constant distance 3 + peaks_all = np.arange(1, 21, 3) + x = np.zeros(21) + x[peaks_all] += np.linspace(1, 2, peaks_all.size) + + # Test if peaks with "minimal" distance are still selected (distance = 3) + assert_equal(find_peaks(x, distance=3)[0], peaks_all) + + # Select every second peak (distance > 3) + peaks_subset = find_peaks(x, distance=3.0001)[0] + # Test if peaks_subset is subset of peaks_all + assert_( + np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0 + ) + # Test if every second peak was removed + assert_equal(np.diff(peaks_subset), 6) + + # Test priority of peak removal + x = [-2, 1, -1, 0, -3] + peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size + assert_(peaks_subset.size == 1 and peaks_subset[0] == 1) + + def test_prominence_condition(self): + """ + Test prominence condition for peaks. + """ + x = np.linspace(0, 10, 100) + peaks_true = np.arange(1, 99, 2) + offset = np.linspace(1, 10, peaks_true.size) + x[peaks_true] += offset + prominences = x[peaks_true] - x[peaks_true + 1] + interval = (3, 9) + keep = np.nonzero( + (interval[0] <= prominences) & (prominences <= interval[1])) + + peaks_calc, properties = find_peaks(x, prominence=interval) + assert_equal(peaks_calc, peaks_true[keep]) + assert_equal(properties['prominences'], prominences[keep]) + assert_equal(properties['left_bases'], 0) + assert_equal(properties['right_bases'], peaks_true[keep] + 1) + + def test_width_condition(self): + """ + Test width condition for peaks. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0]) + peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75) + assert_equal(peaks.size, 1) + assert_equal(peaks, 7) + assert_allclose(props['widths'], 1.35) + assert_allclose(props['width_heights'], 1.) + assert_allclose(props['left_ips'], 6.4) + assert_allclose(props['right_ips'], 7.75) + + def test_properties(self): + """ + Test returned properties. + """ + open_interval = (None, None) + x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9] + peaks, props = find_peaks(x, + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert_(len(props) == len(self.property_keys)) + for key in self.property_keys: + assert_(peaks.size == props[key].size) + + def test_raises(self): + """ + Test exceptions raised by function. + """ + with raises(ValueError, match="1-D array"): + find_peaks(np.array(1)) + with raises(ValueError, match="1-D array"): + find_peaks(np.ones((2, 2))) + with raises(ValueError, match="distance"): + find_peaks(np.arange(10), distance=-1) + + @pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0", + "ignore:some peaks have a width of 0") + def test_wlen_smaller_plateau(self): + """ + Test behavior of prominence and width calculation if the given window + length is smaller than a peak's plateau size. + + Regression test for gh-9110. + """ + peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None), + width=(None, None), wlen=2) + assert_equal(peaks, 2) + assert_equal(props["prominences"], 0) + assert_equal(props["widths"], 0) + assert_equal(props["width_heights"], 1) + for key in ("left_bases", "right_bases", "left_ips", "right_ips"): + assert_equal(props[key], peaks) + + @pytest.mark.parametrize("kwargs", [ + {}, + {"distance": 3.0}, + {"prominence": (None, None)}, + {"width": (None, 2)}, + + ]) + def test_readonly_array(self, kwargs): + """ + Test readonly arrays are accepted. + """ + x = np.linspace(0, 10, 15) + x_readonly = x.copy() + x_readonly.flags.writeable = False + + peaks, _ = find_peaks(x) + peaks_readonly, _ = find_peaks(x_readonly, **kwargs) + + assert_allclose(peaks, peaks_readonly) + + +class TestFindPeaksCwt: + + def test_find_peaks_exact(self): + """ + Generate a series of gaussians and attempt to find the peak locations. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0, + min_length=None) + np.testing.assert_array_equal(found_locs, act_locs, + "Found maximum locations did not equal those expected") + + def test_find_peaks_withnoise(self): + """ + Verify that peak locations are (approximately) found + for a series of gaussians with added noise. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + noise_amp = 0.07 + np.random.seed(18181911) + test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) + found_locs = find_peaks_cwt(test_data, widths, min_length=15, + gap_thresh=1, min_snr=noise_amp / 5) + + np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' + + 'of peaks found than expected') + diffs = np.abs(found_locs - act_locs) + max_diffs = np.array(sigmas) / 5 + np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' + + 'by more than %s' % (max_diffs)) + + def test_find_peaks_nopeak(self): + """ + Verify that no peak is found in + data that's just noise. + """ + noise_amp = 1.0 + num_points = 100 + np.random.seed(181819141) + test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp) + widths = np.arange(10, 50) + found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30) + np.testing.assert_equal(len(found_locs), 0) + + def test_find_peaks_with_non_default_wavelets(self): + x = gaussian(200, 2) + widths = np.array([1, 2, 3, 4]) + a = find_peaks_cwt(x, widths, wavelet=gaussian) + + np.testing.assert_equal(np.array([100]), a) + + def test_find_peaks_window_size(self): + """ + Verify that window_size is passed correctly to private function and + affects the result. + """ + sigmas = [2.0, 2.0] + num_points = 1000 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas), 0.2) + noise_amp = 0.05 + np.random.seed(18181911) + test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) + + # Possibly contrived negative region to throw off peak finding + # when window_size is too large + test_data[250:320] -= 1 + + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, + min_length=None, window_size=None) + with pytest.raises(AssertionError): + assert found_locs.size == act_locs.size + + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, + min_length=None, window_size=20) + assert found_locs.size == act_locs.size + + def test_find_peaks_with_one_width(self): + """ + Verify that the `width` argument + in `find_peaks_cwt` can be a float + """ + xs = np.arange(0, np.pi, 0.05) + test_data = np.sin(xs) + widths = 1 + found_locs = find_peaks_cwt(test_data, widths) + + np.testing.assert_equal(found_locs, 32) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py new file mode 100644 index 0000000000000000000000000000000000000000..58fdd458ef665051805c21ebce5340949fdca428 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py @@ -0,0 +1,52 @@ +# Regressions tests on result types of some signal functions + +import numpy as np +from numpy.testing import assert_ + +from scipy.signal import (decimate, + lfilter_zi, + lfiltic, + sos2tf, + sosfilt_zi) + + +def test_decimate(): + ones_f32 = np.ones(32, dtype=np.float32) + assert_(decimate(ones_f32, 2).dtype == np.float32) + + ones_i64 = np.ones(32, dtype=np.int64) + assert_(decimate(ones_i64, 2).dtype == np.float64) + + +def test_lfilter_zi(): + b_f32 = np.array([1, 2, 3], dtype=np.float32) + a_f32 = np.array([4, 5, 6], dtype=np.float32) + assert_(lfilter_zi(b_f32, a_f32).dtype == np.float32) + + +def test_lfiltic(): + # this would return f32 when given a mix of f32 / f64 args + b_f32 = np.array([1, 2, 3], dtype=np.float32) + a_f32 = np.array([4, 5, 6], dtype=np.float32) + x_f32 = np.ones(32, dtype=np.float32) + + b_f64 = b_f32.astype(np.float64) + a_f64 = a_f32.astype(np.float64) + x_f64 = x_f32.astype(np.float64) + + assert_(lfiltic(b_f64, a_f32, x_f32).dtype == np.float64) + assert_(lfiltic(b_f32, a_f64, x_f32).dtype == np.float64) + assert_(lfiltic(b_f32, a_f32, x_f64).dtype == np.float64) + assert_(lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64) + + +def test_sos2tf(): + sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) + b, a = sos2tf(sos_f32) + assert_(b.dtype == np.float32) + assert_(a.dtype == np.float32) + + +def test_sosfilt_zi(): + sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) + assert_(sosfilt_zi(sos_f32).dtype == np.float32) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbf370bf558612b7f866b252303b2cfdcb58b06 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py @@ -0,0 +1,358 @@ +import pytest +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, + assert_almost_equal, assert_array_equal, + assert_array_almost_equal) + +from scipy.ndimage import convolve1d + +from scipy.signal import savgol_coeffs, savgol_filter +from scipy.signal._savitzky_golay import _polyder + + +def check_polyder(p, m, expected): + dp = _polyder(p, m) + assert_array_equal(dp, expected) + + +def test_polyder(): + cases = [ + ([5], 0, [5]), + ([5], 1, [0]), + ([3, 2, 1], 0, [3, 2, 1]), + ([3, 2, 1], 1, [6, 2]), + ([3, 2, 1], 2, [6]), + ([3, 2, 1], 3, [0]), + ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]), + ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]), + ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]), + ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]), + ] + for p, m, expected in cases: + check_polyder(np.array(p).T, m, np.array(expected).T) + + +#-------------------------------------------------------------------- +# savgol_coeffs tests +#-------------------------------------------------------------------- + +def alt_sg_coeffs(window_length, polyorder, pos): + """This is an alternative implementation of the SG coefficients. + + It uses numpy.polyfit and numpy.polyval. The results should be + equivalent to those of savgol_coeffs(), but this implementation + is slower. + + window_length should be odd. + + """ + if pos is None: + pos = window_length // 2 + t = np.arange(window_length) + unit = (t == pos).astype(int) + h = np.polyval(np.polyfit(t, unit, polyorder), t) + return h + + +def test_sg_coeffs_trivial(): + # Test a trivial case of savgol_coeffs: polyorder = window_length - 1 + h = savgol_coeffs(1, 0) + assert_allclose(h, [1]) + + h = savgol_coeffs(3, 2) + assert_allclose(h, [0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4) + assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1) + assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1, use='dot') + assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10) + + +def compare_coeffs_to_alt(window_length, order): + # For the given window_length and order, compare the results + # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1. + # Also include pos=None. + for pos in [None] + list(range(window_length)): + h1 = savgol_coeffs(window_length, order, pos=pos, use='dot') + h2 = alt_sg_coeffs(window_length, order, pos=pos) + assert_allclose(h1, h2, atol=1e-10, + err_msg=("window_length = %d, order = %d, pos = %s" % + (window_length, order, pos))) + + +def test_sg_coeffs_compare(): + # Compare savgol_coeffs() to alt_sg_coeffs(). + for window_length in range(1, 8, 2): + for order in range(window_length): + compare_coeffs_to_alt(window_length, order) + + +def test_sg_coeffs_exact(): + polyorder = 4 + window_length = 9 + halflen = window_length // 2 + + x = np.linspace(0, 21, 43) + delta = x[1] - x[0] + + # The data is a cubic polynomial. We'll use an order 4 + # SG filter, so the filtered values should equal the input data + # (except within half window_length of the edges). + y = 0.5 * x ** 3 - x + h = savgol_coeffs(window_length, polyorder) + y0 = convolve1d(y, h) + assert_allclose(y0[halflen:-halflen], y[halflen:-halflen]) + + # Check the same input, but use deriv=1. dy is the exact result. + dy = 1.5 * x ** 2 - 1 + h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta) + y1 = convolve1d(y, h) + assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen]) + + # Check the same input, but use deriv=2. d2y is the exact result. + d2y = 3.0 * x + h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta) + y2 = convolve1d(y, h) + assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen]) + + +def test_sg_coeffs_deriv(): + # The data in `x` is a sampled parabola, so using savgol_coeffs with an + # order 2 or higher polynomial should give exact results. + i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0]) + x = i ** 2 / 4 + dx = i / 2 + d2x = np.full_like(i, 0.5) + for pos in range(x.size): + coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot') + assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10) + coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1) + assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10) + coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2) + assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10) + + +def test_sg_coeffs_deriv_gt_polyorder(): + """ + If deriv > polyorder, the coefficients should be all 0. + This is a regression test for a bug where, e.g., + savgol_coeffs(5, polyorder=1, deriv=2) + raised an error. + """ + coeffs = savgol_coeffs(5, polyorder=1, deriv=2) + assert_array_equal(coeffs, np.zeros(5)) + coeffs = savgol_coeffs(7, polyorder=4, deriv=6) + assert_array_equal(coeffs, np.zeros(7)) + + +def test_sg_coeffs_large(): + # Test that for large values of window_length and polyorder the array of + # coefficients returned is symmetric. The aim is to ensure that + # no potential numeric overflow occurs. + coeffs0 = savgol_coeffs(31, 9) + assert_array_almost_equal(coeffs0, coeffs0[::-1]) + coeffs1 = savgol_coeffs(31, 9, deriv=1) + assert_array_almost_equal(coeffs1, -coeffs1[::-1]) + +# -------------------------------------------------------------------- +# savgol_coeffs tests for even window length +# -------------------------------------------------------------------- + + +def test_sg_coeffs_even_window_length(): + # Simple case - deriv=0, polyorder=0, 1 + window_lengths = [4, 6, 8, 10, 12, 14, 16] + for length in window_lengths: + h_p_d = savgol_coeffs(length, 0, 0) + assert_allclose(h_p_d, 1/length) + + # Verify with closed forms + # deriv=1, polyorder=1, 2 + def h_p_d_closed_form_1(k, m): + return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1)) + + # deriv=2, polyorder=2 + def h_p_d_closed_form_2(k, m): + numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2) + denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1) + return numer/denom + + for length in window_lengths: + m = length//2 + expected_output = [h_p_d_closed_form_1(k, m) + for k in range(-m + 1, m + 1)][::-1] + actual_output = savgol_coeffs(length, 1, 1) + assert_allclose(expected_output, actual_output) + actual_output = savgol_coeffs(length, 2, 1) + assert_allclose(expected_output, actual_output) + + expected_output = [h_p_d_closed_form_2(k, m) + for k in range(-m + 1, m + 1)][::-1] + actual_output = savgol_coeffs(length, 2, 2) + assert_allclose(expected_output, actual_output) + actual_output = savgol_coeffs(length, 3, 2) + assert_allclose(expected_output, actual_output) + +#-------------------------------------------------------------------- +# savgol_filter tests +#-------------------------------------------------------------------- + + +def test_sg_filter_trivial(): + """ Test some trivial edge cases for savgol_filter().""" + x = np.array([1.0]) + y = savgol_filter(x, 1, 0) + assert_equal(y, [1.0]) + + # Input is a single value. With a window length of 3 and polyorder 1, + # the value in y is from the straight-line fit of (-1,0), (0,3) and + # (1, 0) at 0. This is just the average of the three values, hence 1.0. + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_almost_equal(y, [1.0], decimal=15) + + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='nearest') + assert_almost_equal(y, [3.0], decimal=15) + + x = np.array([1.0] * 3) + y = savgol_filter(x, 3, 1, mode='wrap') + assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15) + + +def test_sg_filter_basic(): + # Some basic test cases for savgol_filter(). + x = np.array([1.0, 2.0, 1.0]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_allclose(y, [1.0, 4.0 / 3, 1.0]) + + y = savgol_filter(x, 3, 1, mode='mirror') + assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3]) + + y = savgol_filter(x, 3, 1, mode='wrap') + assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3]) + + +def test_sg_filter_2d(): + x = np.array([[1.0, 2.0, 1.0], + [2.0, 4.0, 2.0]]) + expected = np.array([[1.0, 4.0 / 3, 1.0], + [2.0, 8.0 / 3, 2.0]]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_allclose(y, expected) + + y = savgol_filter(x.T, 3, 1, mode='constant', axis=0) + assert_allclose(y, expected.T) + + +def test_sg_filter_interp_edges(): + # Another test with low degree polynomial data, for which we can easily + # give the exact results. In this test, we use mode='interp', so + # savgol_filter should match the exact solution for the entire data set, + # including the edges. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + # Polynomial test data. + x = np.array([t, + 3 * t ** 2, + t ** 3 - t]) + dx = np.array([np.ones_like(t), + 6 * t, + 3 * t ** 2 - 1.0]) + d2x = np.array([np.zeros_like(t), + np.full_like(t, 6), + 6 * t]) + + window_length = 7 + + y = savgol_filter(x, window_length, 3, axis=-1, mode='interp') + assert_allclose(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=1, delta=delta) + assert_allclose(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=2, delta=delta) + assert_allclose(y2, d2x, atol=1e-12) + + # Transpose everything, and test again with axis=0. + + x = x.T + dx = dx.T + d2x = d2x.T + + y = savgol_filter(x, window_length, 3, axis=0, mode='interp') + assert_allclose(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=1, delta=delta) + assert_allclose(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=2, delta=delta) + assert_allclose(y2, d2x, atol=1e-12) + + +def test_sg_filter_interp_edges_3d(): + # Test mode='interp' with a 3-D array. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + x1 = np.array([t, -t]) + x2 = np.array([t ** 2, 3 * t ** 2 + 5]) + x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t]) + dx1 = np.array([np.ones_like(t), -np.ones_like(t)]) + dx2 = np.array([2 * t, 6 * t]) + dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5]) + + # z has shape (3, 2, 21) + z = np.array([x1, x2, x3]) + dz = np.array([dx1, dx2, dx3]) + + y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta) + assert_allclose(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta) + assert_allclose(dy, dz, atol=1e-10) + + # z has shape (3, 21, 2) + z = np.array([x1.T, x2.T, x3.T]) + dz = np.array([dx1.T, dx2.T, dx3.T]) + + y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta) + assert_allclose(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta) + assert_allclose(dy, dz, atol=1e-10) + + # z has shape (21, 3, 2) + z = z.swapaxes(0, 1).copy() + dz = dz.swapaxes(0, 1).copy() + + y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta) + assert_allclose(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta) + assert_allclose(dy, dz, atol=1e-10) + + +def test_sg_filter_valid_window_length_3d(): + """Tests that the window_length check is using the correct axis.""" + + x = np.ones((10, 20, 30)) + + savgol_filter(x, window_length=29, polyorder=3, mode='interp') + + with pytest.raises(ValueError, match='window_length must be less than'): + # window_length is more than x.shape[-1]. + savgol_filter(x, window_length=31, polyorder=3, mode='interp') + + savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp') + + with pytest.raises(ValueError, match='window_length must be less than'): + # window_length is more than x.shape[0]. + savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp') diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_short_time_fft.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_short_time_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..2307f185698c8a8f60e6b46d423999b4fae8cbd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_short_time_fft.py @@ -0,0 +1,840 @@ +"""Unit tests for module `_short_time_fft`. + +This file's structure loosely groups the tests into the following sequential +categories: + +1. Test function `_calc_dual_canonical_window`. +2. Test for invalid parameters and exceptions in `ShortTimeFFT` (until the + `test_from_window` function). +3. Test algorithmic properties of STFT/ISTFT. Some tests were ported from + ``test_spectral.py``. + +Notes +----- +* Mypy 0.990 does interpret the line:: + + from scipy.stats import norm as normal_distribution + + incorrectly (but the code works), hence a ``type: ignore`` was appended. +""" +import math +from itertools import product +from typing import cast, get_args, Literal + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_equal +from scipy.fft import fftshift +from scipy.stats import norm as normal_distribution # type: ignore +from scipy.signal import get_window, welch, stft, istft, spectrogram + +from scipy.signal._short_time_fft import FFT_MODE_TYPE, \ + _calc_dual_canonical_window, ShortTimeFFT, PAD_TYPE +from scipy.signal.windows import gaussian + + +def test__calc_dual_canonical_window_roundtrip(): + """Test dual window calculation with a round trip to verify duality. + + Note that this works only for canonical window pairs (having minimal + energy) like a Gaussian. + + The window is the same as in the example of `from ShortTimeFFT.from_dual`. + """ + win = gaussian(51, std=10, sym=True) + d_win = _calc_dual_canonical_window(win, 10) + win2 = _calc_dual_canonical_window(d_win, 10) + assert_allclose(win2, win) + + +def test__calc_dual_canonical_window_exceptions(): + """Raise all exceptions in `_calc_dual_canonical_window`.""" + # Verify that calculation can fail: + with pytest.raises(ValueError, match="hop=5 is larger than window len.*"): + _calc_dual_canonical_window(np.ones(4), 5) + with pytest.raises(ValueError, match=".* Transform not invertible!"): + _calc_dual_canonical_window(np.array([.1, .2, .3, 0]), 4) + + # Verify that parameter `win` may not be integers: + with pytest.raises(ValueError, match="Parameter 'win' cannot be of int.*"): + _calc_dual_canonical_window(np.ones(4, dtype=int), 1) + + +def test_invalid_initializer_parameters(): + """Verify that exceptions get raised on invalid parameters when + instantiating ShortTimeFFT. """ + with pytest.raises(ValueError, match=r"Parameter win must be 1d, " + + r"but win.shape=\(2, 2\)!"): + ShortTimeFFT(np.ones((2, 2)), hop=4, fs=1) + with pytest.raises(ValueError, match="Parameter win must have " + + "finite entries"): + ShortTimeFFT(np.array([1, np.inf, 2, 3]), hop=4, fs=1) + with pytest.raises(ValueError, match="Parameter hop=0 is not " + + "an integer >= 1!"): + ShortTimeFFT(np.ones(4), hop=0, fs=1) + with pytest.raises(ValueError, match="Parameter hop=2.0 is not " + + "an integer >= 1!"): + # noinspection PyTypeChecker + ShortTimeFFT(np.ones(4), hop=2.0, fs=1) + with pytest.raises(ValueError, match=r"dual_win.shape=\(5,\) must equal " + + r"win.shape=\(4,\)!"): + ShortTimeFFT(np.ones(4), hop=2, fs=1, dual_win=np.ones(5)) + with pytest.raises(ValueError, match="Parameter dual_win must be " + + "a finite array!"): + ShortTimeFFT(np.ones(3), hop=2, fs=1, + dual_win=np.array([np.nan, 2, 3])) + + +def test_exceptions_properties_methods(): + """Verify that exceptions get raised when setting properties or calling + method of ShortTimeFFT to/with invalid values.""" + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + with pytest.raises(ValueError, match="Sampling interval T=-1 must be " + + "positive!"): + SFT.T = -1 + with pytest.raises(ValueError, match="Sampling frequency fs=-1 must be " + + "positive!"): + SFT.fs = -1 + with pytest.raises(ValueError, match="fft_mode='invalid_typ' not in " + + r"\('twosided', 'centered', " + + r"'onesided', 'onesided2X'\)!"): + SFT.fft_mode = 'invalid_typ' + with pytest.raises(ValueError, match="For scaling is None, " + + "fft_mode='onesided2X' is invalid.*"): + SFT.fft_mode = 'onesided2X' + with pytest.raises(ValueError, match="Attribute mfft=7 needs to be " + + "at least the window length.*"): + SFT.mfft = 7 + with pytest.raises(ValueError, match="scaling='invalid' not in.*"): + # noinspection PyTypeChecker + SFT.scale_to('invalid') + with pytest.raises(ValueError, match="phase_shift=3.0 has the unit .*"): + SFT.phase_shift = 3.0 + with pytest.raises(ValueError, match="-mfft < phase_shift < mfft " + + "does not hold.*"): + SFT.phase_shift = 2*SFT.mfft + with pytest.raises(ValueError, match="Parameter padding='invalid' not.*"): + # noinspection PyTypeChecker + g = SFT._x_slices(np.zeros(16), k_off=0, p0=0, p1=1, padding='invalid') + next(g) # execute generator + with pytest.raises(ValueError, match="Trend type must be 'linear' " + + "or 'constant'"): + # noinspection PyTypeChecker + SFT.stft_detrend(np.zeros(16), detr='invalid') + with pytest.raises(ValueError, match="Parameter detr=nan is not a str, " + + "function or None!"): + # noinspection PyTypeChecker + SFT.stft_detrend(np.zeros(16), detr=np.nan) + with pytest.raises(ValueError, match="Invalid Parameter p0=0, p1=200.*"): + SFT.p_range(100, 0, 200) + + with pytest.raises(ValueError, match="f_axis=0 may not be equal to " + + "t_axis=0!"): + SFT.istft(np.zeros((SFT.f_pts, 2)), t_axis=0, f_axis=0) + with pytest.raises(ValueError, match=r"S.shape\[f_axis\]=2 must be equal" + + " to self.f_pts=5.*"): + SFT.istft(np.zeros((2, 2))) + with pytest.raises(ValueError, match=r"S.shape\[t_axis\]=1 needs to have" + + " at least 2 slices.*"): + SFT.istft(np.zeros((SFT.f_pts, 1))) + with pytest.raises(ValueError, match=r".*\(k1=100\) <= \(k_max=12\) " + + "is false!$"): + SFT.istft(np.zeros((SFT.f_pts, 3)), k1=100) + with pytest.raises(ValueError, match=r"\(k1=1\) - \(k0=0\) = 1 has to " + + "be at least.* length 4!"): + SFT.istft(np.zeros((SFT.f_pts, 3)), k0=0, k1=1) + + with pytest.raises(ValueError, match=r"Parameter axes_seq='invalid' " + + r"not in \['tf', 'ft'\]!"): + # noinspection PyTypeChecker + SFT.extent(n=100, axes_seq='invalid') + with pytest.raises(ValueError, match="Attribute fft_mode=twosided must.*"): + SFT.fft_mode = 'twosided' + SFT.extent(n=100) + + +@pytest.mark.parametrize('m', ('onesided', 'onesided2X')) +def test_exceptions_fft_mode_complex_win(m: FFT_MODE_TYPE): + """Verify that one-sided spectra are not allowed with complex-valued + windows or with complex-valued signals. + + The reason being, the `rfft` function only accepts real-valued input. + """ + with pytest.raises(ValueError, + match=f"One-sided spectra, i.e., fft_mode='{m}'.*"): + ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode=m) + + SFT = ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode='twosided') + with pytest.raises(ValueError, + match=f"One-sided spectra, i.e., fft_mode='{m}'.*"): + SFT.fft_mode = m + + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1, scale_to='psd', fft_mode='onesided') + with pytest.raises(ValueError, match="Complex-valued `x` not allowed for self.*"): + SFT.stft(np.ones(8)*1j) + SFT.fft_mode = 'onesided2X' + with pytest.raises(ValueError, match="Complex-valued `x` not allowed for self.*"): + SFT.stft(np.ones(8)*1j) + + +def test_invalid_fft_mode_RuntimeError(): + """Ensure exception gets raised when property `fft_mode` is invalid. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + SFT._fft_mode = 'invalid_typ' + + with pytest.raises(RuntimeError): + _ = SFT.f + with pytest.raises(RuntimeError): + SFT._fft_func(np.ones(8)) + with pytest.raises(RuntimeError): + SFT._ifft_func(np.ones(8)) + + +@pytest.mark.parametrize('win_params, Nx', [(('gaussian', 2.), 9), # in docstr + ('triang', 7), + (('kaiser', 4.0), 9), + (('exponential', None, 1.), 9), + (4.0, 9)]) +def test_from_window(win_params, Nx: int): + """Verify that `from_window()` handles parameters correctly. + + The window parameterizations are documented in the `get_window` docstring. + """ + w_sym, fs = get_window(win_params, Nx, fftbins=False), 16. + w_per = get_window(win_params, Nx, fftbins=True) + SFT0 = ShortTimeFFT(w_sym, hop=3, fs=fs, fft_mode='twosided', + scale_to='psd', phase_shift=1) + nperseg = len(w_sym) + noverlap = nperseg - SFT0.hop + SFT1 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap, + symmetric_win=True, fft_mode='twosided', + scale_to='psd', phase_shift=1) + # periodic window: + SFT2 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap, + symmetric_win=False, fft_mode='twosided', + scale_to='psd', phase_shift=1) + # Be informative when comparing instances: + assert_equal(SFT1.win, SFT0.win) + assert_allclose(SFT2.win, w_per / np.sqrt(sum(w_per**2) * fs)) + for n_ in ('hop', 'T', 'fft_mode', 'mfft', 'scaling', 'phase_shift'): + v0, v1, v2 = (getattr(SFT_, n_) for SFT_ in (SFT0, SFT1, SFT2)) + assert v1 == v0, f"SFT1.{n_}={v1} does not equal SFT0.{n_}={v0}" + assert v2 == v0, f"SFT2.{n_}={v2} does not equal SFT0.{n_}={v0}" + + +def test_dual_win_roundtrip(): + """Verify the duality of `win` and `dual_win`. + + Note that this test does not work for arbitrary windows, since dual windows + are not unique. It always works for invertible STFTs if the windows do not + overlap. + """ + # Non-standard values for keyword arguments (except for `scale_to`): + kw = dict(hop=4, fs=1, fft_mode='twosided', mfft=8, scale_to=None, + phase_shift=2) + SFT0 = ShortTimeFFT(np.ones(4), **kw) + SFT1 = ShortTimeFFT.from_dual(SFT0.dual_win, **kw) + assert_allclose(SFT1.dual_win, SFT0.win) + + +@pytest.mark.parametrize('scale_to, fac_psd, fac_mag', + [(None, 0.25, 0.125), + ('magnitude', 2.0, 1), + ('psd', 1, 0.5)]) +def test_scaling(scale_to: Literal['magnitude', 'psd'], fac_psd, fac_mag): + """Verify scaling calculations. + + * Verify passing `scale_to`parameter to ``__init__(). + * Roundtrip while changing scaling factor. + """ + SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=scale_to) + assert SFT.fac_psd == fac_psd + assert SFT.fac_magnitude == fac_mag + # increase coverage by accessing properties twice: + assert SFT.fac_psd == fac_psd + assert SFT.fac_magnitude == fac_mag + + x = np.fft.irfft([0, 0, 7, 0, 0, 0, 0]) # periodic signal + Sx = SFT.stft(x) + Sx_mag, Sx_psd = Sx * SFT.fac_magnitude, Sx * SFT.fac_psd + + SFT.scale_to('magnitude') + x_mag = SFT.istft(Sx_mag, k1=len(x)) + assert_allclose(x_mag, x) + + SFT.scale_to('psd') + x_psd = SFT.istft(Sx_psd, k1=len(x)) + assert_allclose(x_psd, x) + + +def test_scale_to(): + """Verify `scale_to()` method.""" + SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None) + + SFT.scale_to('magnitude') + assert SFT.scaling == 'magnitude' + assert SFT.fac_psd == 2.0 + assert SFT.fac_magnitude == 1 + + SFT.scale_to('psd') + assert SFT.scaling == 'psd' + assert SFT.fac_psd == 1 + assert SFT.fac_magnitude == 0.5 + + SFT.scale_to('psd') # needed for coverage + + for scale, s_fac in zip(('magnitude', 'psd'), (8, 4)): + SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None) + dual_win = SFT.dual_win.copy() + + SFT.scale_to(cast(Literal['magnitude', 'psd'], scale)) + assert_allclose(SFT.dual_win, dual_win * s_fac) + + +def test_x_slices_padding(): + """Verify padding. + + The reference arrays were taken from the docstrings of `zero_ext`, + `const_ext`, `odd_ext()`, and `even_ext()` from the _array_tools module. + """ + SFT = ShortTimeFFT(np.ones(5), hop=4, fs=1) + x = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]], dtype=float) + d = {'zeros': [[[0, 0, 1, 2, 3], [0, 0, 0, 1, 4]], + [[3, 4, 5, 0, 0], [4, 9, 16, 0, 0]]], + 'edge': [[[1, 1, 1, 2, 3], [0, 0, 0, 1, 4]], + [[3, 4, 5, 5, 5], [4, 9, 16, 16, 16]]], + 'even': [[[3, 2, 1, 2, 3], [4, 1, 0, 1, 4]], + [[3, 4, 5, 4, 3], [4, 9, 16, 9, 4]]], + 'odd': [[[-1, 0, 1, 2, 3], [-4, -1, 0, 1, 4]], + [[3, 4, 5, 6, 7], [4, 9, 16, 23, 28]]]} + for p_, xx in d.items(): + gen = SFT._x_slices(np.array(x), 0, 0, 2, padding=cast(PAD_TYPE, p_)) + yy = np.array([y_.copy() for y_ in gen]) # due to inplace copying + assert_equal(yy, xx, err_msg=f"Failed '{p_}' padding.") + + +def test_invertible(): + """Verify `invertible` property. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + assert SFT.invertible + SFT = ShortTimeFFT(np.ones(8), hop=9, fs=1) + assert not SFT.invertible + + +def test_border_values(): + """Ensure that minimum and maximum values of slices are correct.""" + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + assert SFT.p_min == 0 + assert SFT.k_min == -4 + assert SFT.lower_border_end == (4, 1) + assert SFT.lower_border_end == (4, 1) # needed to test caching + assert SFT.p_max(10) == 4 + assert SFT.k_max(10) == 16 + assert SFT.upper_border_begin(10) == (4, 2) + + +def test_border_values_exotic(): + """Ensure that the border calculations are correct for windows with + zeros. """ + w = np.array([0, 0, 0, 0, 0, 0, 0, 1.]) + SFT = ShortTimeFFT(w, hop=1, fs=1) + assert SFT.lower_border_end == (0, 0) + + SFT = ShortTimeFFT(np.flip(w), hop=20, fs=1) + assert SFT.upper_border_begin(4) == (0, 0) + + SFT._hop = -1 # provoke unreachable line + with pytest.raises(RuntimeError): + _ = SFT.k_max(4) + with pytest.raises(RuntimeError): + _ = SFT.k_min + + +def test_t(): + """Verify that the times of the slices are correct. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=2) + assert SFT.T == 1/2 + assert SFT.fs == 2. + assert SFT.delta_t == 4 * 1/2 + t_stft = np.arange(0, SFT.p_max(10)) * SFT.delta_t + assert_equal(SFT.t(10), t_stft) + assert_equal(SFT.t(10, 1, 3), t_stft[1:3]) + SFT.T = 1/4 + assert SFT.T == 1/4 + assert SFT.fs == 4 + SFT.fs = 1/8 + assert SFT.fs == 1/8 + assert SFT.T == 8 + + +@pytest.mark.parametrize('fft_mode, f', + [('onesided', [0., 1., 2.]), + ('onesided2X', [0., 1., 2.]), + ('twosided', [0., 1., 2., -2., -1.]), + ('centered', [-2., -1., 0., 1., 2.])]) +def test_f(fft_mode: FFT_MODE_TYPE, f): + """Verify the frequency values property `f`.""" + SFT = ShortTimeFFT(np.ones(5), hop=4, fs=5, fft_mode=fft_mode, + scale_to='psd') + assert_equal(SFT.f, f) + + +def test_extent(): + """Ensure that the `extent()` method is correct. """ + SFT = ShortTimeFFT(np.ones(32), hop=4, fs=32, fft_mode='onesided') + assert SFT.extent(100, 'tf', False) == (-0.375, 3.625, 0.0, 17.0) + assert SFT.extent(100, 'ft', False) == (0.0, 17.0, -0.375, 3.625) + assert SFT.extent(100, 'tf', True) == (-0.4375, 3.5625, -0.5, 16.5) + assert SFT.extent(100, 'ft', True) == (-0.5, 16.5, -0.4375, 3.5625) + + SFT = ShortTimeFFT(np.ones(32), hop=4, fs=32, fft_mode='centered') + assert SFT.extent(100, 'tf', False) == (-0.375, 3.625, -16.0, 15.0) + + +def test_spectrogram(): + """Verify spectrogram and cross-spectrogram methods. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + x, y = np.ones(10), np.arange(10) + X, Y = SFT.stft(x), SFT.stft(y) + assert_allclose(SFT.spectrogram(x), X.real**2+X.imag**2) + assert_allclose(SFT.spectrogram(x, y), X * Y.conj()) + + +@pytest.mark.parametrize('n', [8, 9]) +def test_fft_func_roundtrip(n: int): + """Test roundtrip `ifft_func(fft_func(x)) == x` for all permutations of + relevant parameters. """ + np.random.seed(2394795) + x0 = np.random.rand(n) + w, h_n = np.ones(n), 4 + + pp = dict( + fft_mode=get_args(FFT_MODE_TYPE), + mfft=[None, n, n+1, n+2], + scaling=[None, 'magnitude', 'psd'], + phase_shift=[None, -n+1, 0, n // 2, n-1]) + for f_typ, mfft, scaling, phase_shift in product(*pp.values()): + if f_typ == 'onesided2X' and scaling is None: + continue # this combination is forbidden + SFT = ShortTimeFFT(w, h_n, fs=n, fft_mode=f_typ, mfft=mfft, + scale_to=scaling, phase_shift=phase_shift) + X0 = SFT._fft_func(x0) + x1 = SFT._ifft_func(X0) + assert_allclose(x0, x1, err_msg="_fft_func() roundtrip failed for " + + f"{f_typ=}, {mfft=}, {scaling=}, {phase_shift=}") + + SFT = ShortTimeFFT(w, h_n, fs=1) + SFT._fft_mode = 'invalid_fft' # type: ignore + with pytest.raises(RuntimeError): + SFT._fft_func(x0) + with pytest.raises(RuntimeError): + SFT._ifft_func(x0) + + +@pytest.mark.parametrize('i', range(19)) +def test_impulse_roundtrip(i): + """Roundtrip for an impulse being at different positions `i`.""" + n = 19 + w, h_n = np.ones(8), 3 + x = np.zeros(n) + x[i] = 1 + + SFT = ShortTimeFFT(w, hop=h_n, fs=1, scale_to=None, phase_shift=None) + Sx = SFT.stft(x) + # test slicing the input signal into two parts: + n_q = SFT.nearest_k_p(n // 2) + Sx0 = SFT.stft(x[:n_q], padding='zeros') + Sx1 = SFT.stft(x[n_q:], padding='zeros') + q0_ub = SFT.upper_border_begin(n_q)[1] - SFT.p_min + q1_le = SFT.lower_border_end[1] - SFT.p_min + assert_allclose(Sx0[:, :q0_ub], Sx[:, :q0_ub], err_msg=f"{i=}") + assert_allclose(Sx1[:, q1_le:], Sx[:, q1_le-Sx1.shape[1]:], + err_msg=f"{i=}") + + Sx01 = np.hstack((Sx0[:, :q0_ub], + Sx0[:, q0_ub:] + Sx1[:, :q1_le], + Sx1[:, q1_le:])) + assert_allclose(Sx, Sx01, atol=1e-8, err_msg=f"{i=}") + + y = SFT.istft(Sx, 0, n) + assert_allclose(y, x, atol=1e-8, err_msg=f"{i=}") + y0 = SFT.istft(Sx, 0, n//2) + assert_allclose(x[:n//2], y0, atol=1e-8, err_msg=f"{i=}") + y1 = SFT.istft(Sx, n // 2, n) + assert_allclose(x[n // 2:], y1, atol=1e-8, err_msg=f"{i=}") + + +@pytest.mark.parametrize('hop', [1, 7, 8]) +def test_asymmetric_window_roundtrip(hop: int): + """An asymmetric window could uncover indexing problems. """ + np.random.seed(23371) + + w = np.arange(16) / 8 # must be of type float + w[len(w)//2:] = 1 + SFT = ShortTimeFFT(w, hop, fs=1) + + x = 10 * np.random.randn(64) + Sx = SFT.stft(x) + x1 = SFT.istft(Sx, k1=len(x)) + assert_allclose(x1, x1, err_msg="Roundtrip for asymmetric window with " + + f" {hop=} failed!") + + +@pytest.mark.parametrize('m_num', [6, 7]) +def test_minimal_length_signal(m_num): + """Verify that the shortest allowed signal works. """ + SFT = ShortTimeFFT(np.ones(m_num), m_num//2, fs=1) + n = math.ceil(m_num/2) + x = np.ones(n) + Sx = SFT.stft(x) + x1 = SFT.istft(Sx, k1=n) + assert_allclose(x1, x, err_msg=f"Roundtrip minimal length signal ({n=})" + + f" for {m_num} sample window failed!") + with pytest.raises(ValueError, match=rf"len\(x\)={n-1} must be >= ceil.*"): + SFT.stft(x[:-1]) + with pytest.raises(ValueError, match=rf"S.shape\[t_axis\]={Sx.shape[1]-1}" + f" needs to have at least {Sx.shape[1]} slices"): + SFT.istft(Sx[:, :-1], k1=n) + + +def test_tutorial_stft_sliding_win(): + """Verify example in "Sliding Windows" subsection from the "User Guide". + + In :ref:`tutorial_stft_sliding_win` (file ``signal.rst``) of the + :ref:`user_guide` the behavior the border behavior of + ``ShortTimeFFT(np.ones(6), 2, fs=1)`` with a 50 sample signal is discussed. + This test verifies the presented indexes. + """ + SFT = ShortTimeFFT(np.ones(6), 2, fs=1) + + # Lower border: + assert SFT.m_num_mid == 3, f"Slice middle is not 3 but {SFT.m_num_mid=}" + assert SFT.p_min == -1, f"Lowest slice {SFT.p_min=} is not -1" + assert SFT.k_min == -5, f"Lowest slice sample {SFT.p_min=} is not -5" + k_lb, p_lb = SFT.lower_border_end + assert p_lb == 2, f"First unaffected slice {p_lb=} is not 2" + assert k_lb == 5, f"First unaffected sample {k_lb=} is not 5" + + n = 50 # upper signal border + assert (p_max := SFT.p_max(n)) == 27, f"Last slice {p_max=} must be 27" + assert (k_max := SFT.k_max(n)) == 55, f"Last sample {k_max=} must be 55" + k_ub, p_ub = SFT.upper_border_begin(n) + assert p_ub == 24, f"First upper border slice {p_ub=} must be 24" + assert k_ub == 45, f"First upper border slice {k_ub=} must be 45" + + +def test_tutorial_stft_legacy_stft(): + """Verify STFT example in "Comparison with Legacy Implementation" from the + "User Guide". + + In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the + :ref:`user_guide` the legacy and the new implementation are compared. + """ + fs, N = 200, 1001 # # 200 Hz sampling rate for 5 s signal + t_z = np.arange(N) / fs # time indexes for signal + z = np.exp(2j*np.pi * 70 * (t_z - 0.2 * t_z ** 2)) # complex-valued chirp + + nperseg, noverlap = 50, 40 + win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard deviation + + # Legacy STFT: + f0_u, t0, Sz0_u = stft(z, fs, win, nperseg, noverlap, + return_onesided=False, scaling='spectrum') + Sz0 = fftshift(Sz0_u, axes=0) + + # New STFT: + SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap, + fft_mode='centered', + scale_to='magnitude', phase_shift=None) + Sz1 = SFT.stft(z) + + assert_allclose(Sz0, Sz1[:, 2:-1]) + + assert_allclose((abs(Sz1[:, 1]).min(), abs(Sz1[:, 1]).max()), + (6.925060911593139e-07, 8.00271269218721e-07)) + + t0_r, z0_r = istft(Sz0_u, fs, win, nperseg, noverlap, input_onesided=False, + scaling='spectrum') + z1_r = SFT.istft(Sz1, k1=N) + assert len(z0_r) == N + 9 + assert_allclose(z0_r[:N], z) + assert_allclose(z1_r, z) + + # Spectrogram is just the absolute square of th STFT: + assert_allclose(SFT.spectrogram(z), abs(Sz1) ** 2) + + +def test_tutorial_stft_legacy_spectrogram(): + """Verify spectrogram example in "Comparison with Legacy Implementation" + from the "User Guide". + + In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the + :ref:`user_guide` the legacy and the new implementation are compared. + """ + fs, N = 200, 1001 # 200 Hz sampling rate for almost 5 s signal + t_z = np.arange(N) / fs # time indexes for signal + z = np.exp(2j*np.pi*70 * (t_z - 0.2*t_z**2)) # complex-valued sweep + + nperseg, noverlap = 50, 40 + win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard dev. + + # Legacy spectrogram: + f2_u, t2, Sz2_u = spectrogram(z, fs, win, nperseg, noverlap, detrend=None, + return_onesided=False, scaling='spectrum', + mode='complex') + + f2, Sz2 = fftshift(f2_u), fftshift(Sz2_u, axes=0) + + # New STFT: + SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap, + fft_mode='centered', scale_to='magnitude', + phase_shift=None) + Sz3 = SFT.stft(z, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2) + t3 = SFT.t(N, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2) + + assert_allclose(t2, t3) + assert_allclose(f2, SFT.f) + assert_allclose(Sz2, Sz3) + + +def test_permute_axes(): + """Verify correctness of four-dimensional signal by permuting its + shape. """ + n = 25 + SFT = ShortTimeFFT(np.ones(8)/8, hop=3, fs=n) + x0 = np.arange(n) + Sx0 = SFT.stft(x0) + Sx0 = Sx0.reshape((Sx0.shape[0], 1, 1, 1, Sx0.shape[-1])) + SxT = np.moveaxis(Sx0, (0, -1), (-1, 0)) + + atol = 2 * np.finfo(SFT.win.dtype).resolution + for i in range(4): + y = np.reshape(x0, np.roll((n, 1, 1, 1), i)) + Sy = SFT.stft(y, axis=i) + assert_allclose(Sy, np.moveaxis(Sx0, 0, i)) + + yb0 = SFT.istft(Sy, k1=n, f_axis=i) + assert_allclose(yb0, y, atol=atol) + # explicit t-axis parameter (for coverage): + yb1 = SFT.istft(Sy, k1=n, f_axis=i, t_axis=Sy.ndim-1) + assert_allclose(yb1, y, atol=atol) + + SyT = np.moveaxis(Sy, (i, -1), (-1, i)) + assert_allclose(SyT, np.moveaxis(SxT, 0, i)) + + ybT = SFT.istft(SyT, k1=n, t_axis=i, f_axis=-1) + assert_allclose(ybT, y, atol=atol) + + +@pytest.mark.parametrize("fft_mode", + ('twosided', 'centered', 'onesided', 'onesided2X')) +def test_roundtrip_multidimensional(fft_mode: FFT_MODE_TYPE): + """Test roundtrip of a multidimensional input signal versus its components. + + This test can uncover potential problems with `fftshift()`. + """ + n = 9 + x = np.arange(4*n*2).reshape(4, n, 2) + SFT = ShortTimeFFT(get_window('hann', 4), hop=2, fs=1, + scale_to='magnitude', fft_mode=fft_mode) + Sx = SFT.stft(x, axis=1) + y = SFT.istft(Sx, k1=n, f_axis=1, t_axis=-1) + assert_allclose(y, x, err_msg='Multidim. roundtrip failed!') + + for i, j in product(range(x.shape[0]), range(x.shape[2])): + y_ = SFT.istft(Sx[i, :, j, :], k1=n) + assert_allclose(y_, x[i, :, j], err_msg="Multidim. roundtrip for component " + + f"x[{i}, :, {j}] and {fft_mode=} failed!") + + +@pytest.mark.parametrize('window, n, nperseg, noverlap', + [('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ('boxcar', 100, 10, 3), # NOLA True, COLA False + ('bartlett', 101, 51, 37), # NOLA True, COLA False + ('hann', 1024, 256, 127), # NOLA True, COLA False + # NOLA True, COLA False: + (('tukey', 0.5), 1152, 256, 14), + ('hann', 1024, 256, 5)]) # NOLA True, COLA False +def test_roundtrip_windows(window, n: int, nperseg: int, noverlap: int): + """Roundtrip test adapted from `test_spectral.TestSTFT`. + + The parameters are taken from the methods test_roundtrip_real(), + test_roundtrip_nola_not_cola(), test_roundtrip_float32(), + test_roundtrip_complex(). + """ + np.random.seed(2394655) + + w = get_window(window, nperseg) + SFT = ShortTimeFFT(w, nperseg - noverlap, fs=1, fft_mode='twosided', + phase_shift=None) + + z = 10 * np.random.randn(n) + 10j * np.random.randn(n) + Sz = SFT.stft(z) + z1 = SFT.istft(Sz, k1=len(z)) + assert_allclose(z, z1, err_msg="Roundtrip for complex values failed") + + x = 10 * np.random.randn(n) + Sx = SFT.stft(x) + x1 = SFT.istft(Sx, k1=len(z)) + assert_allclose(x, x1, err_msg="Roundtrip for float values failed") + + x32 = x.astype(np.float32) + Sx32 = SFT.stft(x32) + x32_1 = SFT.istft(Sx32, k1=len(x32)) + assert_allclose(x32, x32_1, + err_msg="Roundtrip for 32 Bit float values failed") + + +@pytest.mark.parametrize('signal_type', ('real', 'complex')) +def test_roundtrip_complex_window(signal_type): + """Test roundtrip for complex-valued window function + + The purpose of this test is to check if the dual window is calculated + correctly for complex-valued windows. + """ + np.random.seed(1354654) + win = np.exp(2j*np.linspace(0, np.pi, 8)) + SFT = ShortTimeFFT(win, 3, fs=1, fft_mode='twosided') + + z = 10 * np.random.randn(11) + if signal_type == 'complex': + z = z + 2j * z + Sz = SFT.stft(z) + z1 = SFT.istft(Sz, k1=len(z)) + assert_allclose(z, z1, + err_msg="Roundtrip for complex-valued window failed") + + +def test_average_all_segments(): + """Compare `welch` function with stft mean. + + Ported from `TestSpectrogram.test_average_all_segments` from file + ``test__spectral.py``. + """ + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg, noverlap = 16, 2 + fw, Pw = welch(x, fs, window, nperseg, noverlap) + SFT = ShortTimeFFT.from_window(window, fs, nperseg, noverlap, + fft_mode='onesided2X', scale_to='psd', + phase_shift=None) + # `welch` positions the window differently than the STFT: + P = SFT.spectrogram(x, detr='constant', p0=0, + p1=(len(x)-noverlap)//SFT.hop, k_offset=nperseg//2) + + assert_allclose(SFT.f, fw) + assert_allclose(np.mean(P, axis=-1), Pw) + + +@pytest.mark.parametrize('window, N, nperseg, noverlap, mfft', + # from test_roundtrip_padded_FFT: + [('hann', 1024, 256, 128, 512), + ('hann', 1024, 256, 128, 501), + ('boxcar', 100, 10, 0, 33), + (('tukey', 0.5), 1152, 256, 64, 1024), + # from test_roundtrip_padded_signal: + ('boxcar', 101, 10, 0, None), + ('hann', 1000, 256, 128, None), + # from test_roundtrip_boundary_extension: + ('boxcar', 100, 10, 0, None), + ('boxcar', 100, 10, 9, None)]) +@pytest.mark.parametrize('padding', get_args(PAD_TYPE)) +def test_stft_padding_roundtrip(window, N: int, nperseg: int, noverlap: int, + mfft: int, padding): + """Test the parameter 'padding' of `stft` with roundtrips. + + The STFT parametrizations were taken from the methods + `test_roundtrip_padded_FFT`, `test_roundtrip_padded_signal` and + `test_roundtrip_boundary_extension` from class `TestSTFT` in file + ``test_spectral.py``. Note that the ShortTimeFFT does not need the + concept of "boundary extension". + """ + x = normal_distribution.rvs(size=N, random_state=2909) # real signal + z = x * np.exp(1j * np.pi / 4) # complex signal + + SFT = ShortTimeFFT.from_window(window, 1, nperseg, noverlap, + fft_mode='twosided', mfft=mfft) + Sx = SFT.stft(x, padding=padding) + x1 = SFT.istft(Sx, k1=N) + assert_allclose(x1, x, + err_msg=f"Failed real roundtrip with '{padding}' padding") + + Sz = SFT.stft(z, padding=padding) + z1 = SFT.istft(Sz, k1=N) + assert_allclose(z1, z, err_msg="Failed complex roundtrip with " + + f" '{padding}' padding") + + +@pytest.mark.parametrize('N_x', (128, 129, 255, 256, 1337)) # signal length +@pytest.mark.parametrize('w_size', (128, 256)) # window length +@pytest.mark.parametrize('t_step', (4, 64)) # SFT time hop +@pytest.mark.parametrize('f_c', (7., 23.)) # frequency of input sine +def test_energy_conservation(N_x: int, w_size: int, t_step: int, f_c: float): + """Test if a `psd`-scaled STFT conserves the L2 norm. + + This test is adapted from MNE-Python [1]_. Besides being battle-tested, + this test has the benefit of using non-standard window including + non-positive values and a 2d input signal. + + Since `ShortTimeFFT` requires the signal length `N_x` to be at least the + window length `w_size`, the parameter `N_x` was changed from + ``(127, 128, 255, 256, 1337)`` to ``(128, 129, 255, 256, 1337)`` to be + more useful. + + .. [1] File ``test_stft.py`` of MNE-Python + https://github.com/mne-tools/mne-python/blob/main/mne/time_frequency/tests/test_stft.py + """ + window = np.sin(np.arange(.5, w_size + .5) / w_size * np.pi) + SFT = ShortTimeFFT(window, t_step, fs=1000, fft_mode='onesided2X', + scale_to='psd') + atol = 2*np.finfo(window.dtype).resolution + N_x = max(N_x, w_size) # minimal sing + # Test with low frequency signal + t = np.arange(N_x).astype(np.float64) + x = np.sin(2 * np.pi * f_c * t * SFT.T) + x = np.array([x, x + 1.]) + X = SFT.stft(x) + xp = SFT.istft(X, k1=N_x) + + max_freq = SFT.f[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))] + + assert X.shape[1] == SFT.f_pts + assert np.all(SFT.f >= 0.) + assert np.abs(max_freq - f_c) < 1. + assert_allclose(x, xp, atol=atol) + + # check L2-norm squared (i.e., energy) conservation: + E_x = np.sum(x**2, axis=-1) * SFT.T # numerical integration + aX2 = X.real**2 + X.imag.real**2 + E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f + assert_allclose(E_X, E_x, atol=atol) + + # Test with random signal + np.random.seed(2392795) + x = np.random.randn(2, N_x) + X = SFT.stft(x) + xp = SFT.istft(X, k1=N_x) + + assert X.shape[1] == SFT.f_pts + assert np.all(SFT.f >= 0.) + assert np.abs(max_freq - f_c) < 1. + assert_allclose(x, xp, atol=atol) + + # check L2-norm squared (i.e., energy) conservation: + E_x = np.sum(x**2, axis=-1) * SFT.T # numeric integration + aX2 = X.real ** 2 + X.imag.real ** 2 + E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f + assert_allclose(E_X, E_x, atol=atol) + + # Try with empty array + x = np.zeros((0, N_x)) + X = SFT.stft(x) + xp = SFT.istft(X, k1=N_x) + assert xp.shape == x.shape diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..8be2308c47f92070b4c6e8f27212641efb9a4060 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py @@ -0,0 +1,3695 @@ +import sys + +from concurrent.futures import ThreadPoolExecutor, as_completed +from decimal import Decimal +from itertools import product +from math import gcd + +import pytest +from pytest import raises as assert_raises +from numpy.testing import ( + assert_equal, + assert_almost_equal, assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_, assert_array_less, + suppress_warnings) +from numpy import array, arange +import numpy as np + +from scipy.fft import fft +from scipy.ndimage import correlate1d +from scipy.optimize import fmin, linear_sum_assignment +from scipy import signal +from scipy.signal import ( + correlate, correlate2d, correlation_lags, convolve, convolve2d, + fftconvolve, oaconvolve, choose_conv_method, + hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, + invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, + sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue, + residuez) +from scipy.signal.windows import hann +from scipy.signal._signaltools import (_filtfilt_gust, _compute_factors, + _group_poles) +from scipy.signal._upfirdn import _upfirdn_modes +from scipy._lib import _testutils +from scipy._lib._util import ComplexWarning, np_long, np_ulong + + +class _TestConvolve: + + def test_basic(self): + a = [3, 4, 5, 6, 5, 4] + b = [1, 2, 3] + c = convolve(a, b) + assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) + + def test_same(self): + a = [3, 4, 5] + b = [1, 2, 3, 4] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 34])) + + def test_same_eq(self): + a = [3, 4, 5] + b = [1, 2, 3] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 22])) + + def test_complex(self): + x = array([1 + 1j, 2 + 1j, 3 + 1j]) + y = array([1 + 1j, 2 + 1j]) + z = convolve(x, y) + assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) + + def test_zero_rank(self): + a = 1289 + b = 4567 + c = convolve(a, b) + assert_equal(c, a * b) + + def test_broadcastable(self): + a = np.arange(27).reshape(3, 3, 3) + b = np.arange(3) + for i in range(3): + b_shape = [1]*3 + b_shape[i] = 3 + x = convolve(a, b.reshape(b_shape), method='direct') + y = convolve(a, b.reshape(b_shape), method='fft') + assert_allclose(x, y) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + c = convolve(a, b) + assert_equal(c, a * b) + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve(a, b) + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + assert_array_equal(c, d) + + def test_input_swapping(self): + small = arange(8).reshape(2, 2, 2) + big = 1j * arange(27).reshape(3, 3, 3) + big += arange(27)[::-1].reshape(3, 3, 3) + + out_array = array( + [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], + [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], + [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], + [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], + + [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], + [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], + [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], + [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], + + [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], + [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], + [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], + [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], + + [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], + [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], + [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], + [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) + + assert_array_equal(convolve(small, big, 'full'), out_array) + assert_array_equal(convolve(big, small, 'full'), out_array) + assert_array_equal(convolve(small, big, 'same'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'same'), + out_array[0:3, 0:3, 0:3]) + assert_array_equal(convolve(small, big, 'valid'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'valid'), + out_array[1:3, 1:3, 1:3]) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, convolve, a, b, mode='spam') + assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') + assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') + assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') + assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') + + +class TestConvolve(_TestConvolve): + + def test_valid_mode2(self): + # See gh-5897 + a = [1, 2, 3, 6, 5, 3] + b = [2, 3, 4, 5, 3, 4, 2, 2, 1] + expected = [70, 78, 73, 65] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + a = [1 + 5j, 2 - 1j, 3 + 0j] + b = [2 - 3j, 1 + 0j] + expected = [2 - 3j, 8 - 10j] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + def test_same_mode(self): + a = [1, 2, 3, 3, 1, 2] + b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] + c = convolve(a, b, 'same') + d = array([57, 61, 63, 57, 45, 36]) + assert_array_equal(c, d) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) + + def test_convolve_method(self, n=100): + # this types data structure was manually encoded instead of + # using custom filters on the soon-to-be-removed np.sctypes + types = {'uint16', 'uint64', 'int64', 'int32', + 'complex128', 'float64', 'float16', + 'complex64', 'float32', 'int16', + 'uint8', 'uint32', 'int8', 'bool'} + args = [(t1, t2, mode) for t1 in types for t2 in types + for mode in ['valid', 'full', 'same']] + + # These are random arrays, which means test is much stronger than + # convolving testing by convolving two np.ones arrays + np.random.seed(42) + array_types = {'i': np.random.choice([0, 1], size=n), + 'f': np.random.randn(n)} + array_types['b'] = array_types['u'] = array_types['i'] + array_types['c'] = array_types['f'] + 0.5j*array_types['f'] + + for t1, t2, mode in args: + x1 = array_types[np.dtype(t1).kind].astype(t1) + x2 = array_types[np.dtype(t2).kind].astype(t2) + + results = {key: convolve(x1, x2, method=key, mode=mode) + for key in ['fft', 'direct']} + + assert_equal(results['fft'].dtype, results['direct'].dtype) + + if 'bool' in t1 and 'bool' in t2: + assert_equal(choose_conv_method(x1, x2), 'direct') + continue + + # Found by experiment. Found approx smallest value for (rtol, atol) + # threshold to have tests pass. + if any([t in {'complex64', 'float32'} for t in [t1, t2]]): + kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} + elif 'float16' in [t1, t2]: + # atol is default for np.allclose + kwargs = {'rtol': 1e-3, 'atol': 1e-3} + else: + # defaults for np.allclose (different from assert_allclose) + kwargs = {'rtol': 1e-5, 'atol': 1e-8} + + assert_allclose(results['fft'], results['direct'], **kwargs) + + def test_convolve_method_large_input(self): + # This is really a test that convolving two large integers goes to the + # direct method even if they're in the fft method. + for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: + z = np.array([2**n], dtype=np.int64) + fft = convolve(z, z, method='fft') + direct = convolve(z, z, method='direct') + + # this is the case when integer precision gets to us + # issue #6076 has more detail, hopefully more tests after resolved + if n < 50: + assert_equal(fft, direct) + assert_equal(fft, 2**(2*n)) + assert_equal(direct, 2**(2*n)) + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, convolve, [1], 2, method='direct') + assert_raises(ValueError, convolve, 1, [2], method='direct') + assert_raises(ValueError, convolve, [1], 2, method='fft') + assert_raises(ValueError, convolve, 1, [2], method='fft') + assert_raises(ValueError, convolve, [1], [[2]]) + assert_raises(ValueError, convolve, [3], 2) + + +class _TestConvolve2d: + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + e = convolve2d(a, b) + assert_array_equal(e, d) + + def test_valid_mode(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = [[1, 2, 3], [3, 4, 5]] + h = array([[62, 80, 98, 116, 134]]) + + g = convolve2d(e, f, 'valid') + assert_array_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_valid_mode_complx(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j + h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) + + g = convolve2d(e, f, 'valid') + assert_array_almost_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_fillvalue(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + fillval = 1 + c = convolve2d(a, b, 'full', 'fill', fillval) + d = array([[24, 26, 31, 34, 32], + [28, 40, 62, 64, 52], + [32, 46, 67, 62, 48]]) + assert_array_equal(c, d) + + def test_fillvalue_errors(self): + msg = "could not cast `fillvalue` directly to the output " + with np.testing.suppress_warnings() as sup: + sup.filter(ComplexWarning, "Casting complex values") + with assert_raises(ValueError, match=msg): + convolve2d([[1]], [[1, 2]], fillvalue=1j) + + msg = "`fillvalue` must be scalar or an array with " + with assert_raises(ValueError, match=msg): + convolve2d([[1]], [[1, 2]], fillvalue=[1, 2]) + + def test_fillvalue_empty(self): + # Check that fillvalue being empty raises an error: + assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], + fillvalue=[]) + + def test_wrap_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'wrap') + d = array([[80, 80, 74, 80, 80], + [68, 68, 62, 68, 68], + [80, 80, 74, 80, 80]]) + assert_array_equal(c, d) + + def test_sym_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'symm') + d = array([[34, 30, 44, 62, 66], + [52, 48, 62, 80, 84], + [82, 78, 92, 110, 114]]) + assert_array_equal(c, d) + + @pytest.mark.parametrize('func', [convolve2d, correlate2d]) + @pytest.mark.parametrize('boundary, expected', + [('symm', [[37.0, 42.0, 44.0, 45.0]]), + ('wrap', [[43.0, 44.0, 42.0, 39.0]])]) + def test_same_with_boundary(self, func, boundary, expected): + # Test boundary='symm' and boundary='wrap' with a "long" kernel. + # The size of the kernel requires that the values in the "image" + # be extended more than once to handle the requested boundary method. + # This is a regression test for gh-8684 and gh-8814. + image = np.array([[2.0, -1.0, 3.0, 4.0]]) + kernel = np.ones((1, 21)) + result = func(image, kernel, mode='same', boundary=boundary) + # The expected results were calculated "by hand". Because the + # kernel is all ones, the same result is expected for convolve2d + # and correlate2d. + assert_array_equal(result, expected) + + def test_boundary_extension_same(self): + # Regression test for gh-12686. + # Use ndimage.convolve with appropriate arguments to create the + # expected result. + import scipy.ndimage as ndi + a = np.arange(1, 10*3+1, dtype=float).reshape(10, 3) + b = np.arange(1, 10*10+1, dtype=float).reshape(10, 10) + c = convolve2d(a, b, mode='same', boundary='wrap') + assert_array_equal(c, ndi.convolve(a, b, mode='wrap', origin=(-1, -1))) + + def test_boundary_extension_full(self): + # Regression test for gh-12686. + # Use ndimage.convolve with appropriate arguments to create the + # expected result. + import scipy.ndimage as ndi + a = np.arange(1, 3*3+1, dtype=float).reshape(3, 3) + b = np.arange(1, 6*6+1, dtype=float).reshape(6, 6) + c = convolve2d(a, b, mode='full', boundary='wrap') + apad = np.pad(a, ((3, 3), (3, 3)), 'wrap') + assert_array_equal(c, ndi.convolve(apad, b, mode='wrap')[:-1, :-1]) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) + + +class TestConvolve2d(_TestConvolve2d): + + def test_same_mode(self): + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + g = convolve2d(e, f, 'same') + h = array([[22, 28, 34], + [80, 98, 116]]) + assert_array_equal(g, h) + + def test_valid_mode2(self): + # See gh-5897 + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + expected = [[62, 80, 98, 116, 134]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] + f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] + expected = [[27 - 1j, 46. + 2j]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + # See gh-5897 + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + def test_consistency_convolve_funcs(self): + # Compare np.convolve, signal.convolve, signal.convolve2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.convolve(a, b, mode=mode), + signal.convolve(a, b, mode=mode)) + assert_almost_equal(np.squeeze( + signal.convolve2d([a], [b], mode=mode)), + signal.convolve(a, b, mode=mode)) + + def test_invalid_dims(self): + assert_raises(ValueError, convolve2d, 3, 4) + assert_raises(ValueError, convolve2d, [3], [4]) + assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) + + @pytest.mark.slow + @pytest.mark.xfail_on_32bit("Can't create large array for test") + def test_large_array(self): + # Test indexing doesn't overflow an int (gh-10761) + n = 2**31 // (1000 * np.int64().itemsize) + _testutils.check_free_memory(2 * n * 1001 * np.int64().itemsize / 1e6) + + # Create a chequered pattern of 1s and 0s + a = np.zeros(1001 * n, dtype=np.int64) + a[::2] = 1 + a = np.lib.stride_tricks.as_strided(a, shape=(n, 1000), strides=(8008, 8)) + + count = signal.convolve2d(a, [[1, 1]]) + fails = np.where(count > 1) + assert fails[0].size == 0 + + +class TestFFTConvolve: + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_real_axes(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_complex(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_complex_axes(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same_axes(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same_axes(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real_same_mode(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + if axes == '': + out = fftconvolve(a, b, 'same') + else: + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + if axes == '': + out = fftconvolve(b, a, 'same') + else: + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) + def test_real_same_mode_axes(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected_1 = np.tile(expected_1, [2, 1]) + expected_2 = np.tile(expected_2, [2, 1]) + + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_real(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1]]) + def test_valid_mode_real_axes(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_complex(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_valid_mode_complex_axes(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + def test_valid_mode_ignore_nonaxes(self): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [1, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=1) + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(fftconvolve([], []).size == 0) + assert_(fftconvolve([5, 6], []).size == 0) + assert_(fftconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_random_data(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + if axes == '': + out = fftconvolve(a, b, 'full') + else: + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_random_data_axes(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [[1, 4], + [4, 1], + [1, -1], + [-1, 1], + [-4, 4], + [4, -4], + [-4, -1], + [-1, -4]]) + def test_random_data_multidim_axes(self, axes): + a_shape, b_shape = (123, 22), (132, 11) + np.random.seed(1234) + a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape) + b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape) + expected = convolve2d(a, b, 'full') + + a = a[:, :, None, None, None] + b = b[:, :, None, None, None] + expected = expected[:, :, None, None, None] + + a = np.moveaxis(a.swapaxes(0, 2), 1, 4) + b = np.moveaxis(b.swapaxes(0, 2), 1, 4) + expected = np.moveaxis(expected.swapaxes(0, 2), 1, 4) + + # use 1 for dimension 2 in a and 3 in b to test broadcasting + a = np.tile(a, [2, 1, 3, 1, 1]) + b = np.tile(b, [2, 1, 1, 4, 1]) + expected = np.tile(expected, [2, 1, 3, 4, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_allclose(out, expected, rtol=1e-10, atol=1e-10) + + @pytest.mark.slow + @pytest.mark.parametrize( + 'n', + list(range(1, 100)) + + list(range(1000, 1500)) + + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) + def test_many_sizes(self, n): + a = np.random.rand(n) + 1j * np.random.rand(n) + b = np.random.rand(n) + 1j * np.random.rand(n) + expected = np.convolve(a, b, 'full') + + out = fftconvolve(a, b, 'full') + assert_allclose(out, expected, atol=1e-10) + + out = fftconvolve(a, b, 'full', axes=[0]) + assert_allclose(out, expected, atol=1e-10) + + def test_fft_nan(self): + n = 1000 + rng = np.random.default_rng(43876432987) + sig_nan = rng.standard_normal(n) + + for val in [np.nan, np.inf]: + sig_nan[100] = val + coeffs = signal.firwin(200, 0.2) + + msg = "Use of fft convolution.*|invalid value encountered.*" + with pytest.warns(RuntimeWarning, match=msg): + signal.convolve(sig_nan, coeffs, mode='same', method='fft') + +def fftconvolve_err(*args, **kwargs): + raise RuntimeError('Fell back to fftconvolve') + + +def gen_oa_shapes(sizes): + return [(a, b) for a, b in product(sizes, repeat=2) + if abs(a - b) > 3] + + +def gen_oa_shapes_2d(sizes): + shapes0 = gen_oa_shapes(sizes) + shapes1 = gen_oa_shapes(sizes) + shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in + zip(shapes0, shapes1)] + + modes = ['full', 'valid', 'same'] + return [ishapes+(imode,) for ishapes, imode in product(shapes, modes) + if imode != 'valid' or + (ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or + (ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])] + + +def gen_oa_shapes_eq(sizes): + return [(a, b) for a, b in product(sizes, repeat=2) + if a >= b] + + +class TestOAConvolve: + @pytest.mark.slow() + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes_eq(list(range(100)) + + list(range(100, 1000, 23))) + ) + def test_real_manylens(self, shape_a_0, shape_b_0): + a = np.random.rand(shape_a_0) + b = np.random.rand(shape_b_0) + + expected = fftconvolve(a, b) + out = oaconvolve(a, b) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes([50, 47, 6, 4, 1])) + @pytest.mark.parametrize('is_complex', [True, False]) + @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) + def test_1d_noaxes(self, shape_a_0, shape_b_0, + is_complex, mode, monkeypatch): + a = np.random.rand(shape_a_0) + b = np.random.rand(shape_b_0) + if is_complex: + a = a + 1j*np.random.rand(shape_a_0) + b = b + 1j*np.random.rand(shape_b_0) + + expected = fftconvolve(a, b, mode=mode) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [0, 1]) + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes([50, 47, 6, 4])) + @pytest.mark.parametrize('shape_a_extra', [1, 3]) + @pytest.mark.parametrize('shape_b_extra', [1, 3]) + @pytest.mark.parametrize('is_complex', [True, False]) + @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) + def test_1d_axes(self, axes, shape_a_0, shape_b_0, + shape_a_extra, shape_b_extra, + is_complex, mode, monkeypatch): + ax_a = [shape_a_extra]*2 + ax_b = [shape_b_extra]*2 + ax_a[axes] = shape_a_0 + ax_b[axes] = shape_b_0 + + a = np.random.rand(*ax_a) + b = np.random.rand(*ax_b) + if is_complex: + a = a + 1j*np.random.rand(*ax_a) + b = b + 1j*np.random.rand(*ax_b) + + expected = fftconvolve(a, b, mode=mode, axes=axes) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('shape_a_0, shape_b_0, ' + 'shape_a_1, shape_b_1, mode', + gen_oa_shapes_2d([50, 47, 6, 4])) + @pytest.mark.parametrize('is_complex', [True, False]) + def test_2d_noaxes(self, shape_a_0, shape_b_0, + shape_a_1, shape_b_1, mode, + is_complex, monkeypatch): + a = np.random.rand(shape_a_0, shape_a_1) + b = np.random.rand(shape_b_0, shape_b_1) + if is_complex: + a = a + 1j*np.random.rand(shape_a_0, shape_a_1) + b = b + 1j*np.random.rand(shape_b_0, shape_b_1) + + expected = fftconvolve(a, b, mode=mode) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]]) + @pytest.mark.parametrize('shape_a_0, shape_b_0, ' + 'shape_a_1, shape_b_1, mode', + gen_oa_shapes_2d([50, 47, 6, 4])) + @pytest.mark.parametrize('shape_a_extra', [1, 3]) + @pytest.mark.parametrize('shape_b_extra', [1, 3]) + @pytest.mark.parametrize('is_complex', [True, False]) + def test_2d_axes(self, axes, shape_a_0, shape_b_0, + shape_a_1, shape_b_1, mode, + shape_a_extra, shape_b_extra, + is_complex, monkeypatch): + ax_a = [shape_a_extra]*3 + ax_b = [shape_b_extra]*3 + ax_a[axes[0]] = shape_a_0 + ax_b[axes[0]] = shape_b_0 + ax_a[axes[1]] = shape_a_1 + ax_b[axes[1]] = shape_b_1 + + a = np.random.rand(*ax_a) + b = np.random.rand(*ax_b) + if is_complex: + a = a + 1j*np.random.rand(*ax_a) + b = b + 1j*np.random.rand(*ax_b) + + expected = fftconvolve(a, b, mode=mode, axes=axes) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode, axes=axes) + + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(oaconvolve([], []).size == 0) + assert_(oaconvolve([5, 6], []).size == 0) + assert_(oaconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = oaconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = oaconvolve(a, b) + assert_equal(out, a * b) + + +class TestAllFreqConvolves: + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_shapes(self, convapproach): + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + with assert_raises(ValueError, + match="For 'valid' mode, one must be at least " + "as large as the other in every dimension"): + convapproach(a, b, mode='valid') + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_shapes_axes(self, convapproach): + a = np.zeros([5, 6, 2, 1]) + b = np.zeros([5, 6, 3, 1]) + with assert_raises(ValueError, + match=r"incompatible shapes for in1 and in2:" + r" \(5L?, 6L?, 2L?, 1L?\) and" + r" \(5L?, 6L?, 3L?, 1L?\)"): + convapproach(a, b, axes=[0, 1]) + + @pytest.mark.parametrize('a,b', + [([1], 2), + (1, [2]), + ([3], [[2]])]) + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_mismatched_dims(self, a, b, convapproach): + with assert_raises(ValueError, + match="in1 and in2 should have the same" + " dimensionality"): + convapproach(a, b) + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_flags(self, convapproach): + with assert_raises(ValueError, + match="acceptable mode flags are 'valid'," + " 'same', or 'full'"): + convapproach([1], [2], mode='chips') + + with assert_raises(ValueError, + match="when provided, axes cannot be empty"): + convapproach([1], [2], axes=[]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + convapproach([1], [2], axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + convapproach([1], [2], axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + convapproach([1], [2], axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + convapproach([1], [2], axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + convapproach([1], [2], axes=[0, 0]) + + @pytest.mark.parametrize('dtype', [np.longdouble, np.clongdouble]) + def test_longdtype_input(self, dtype): + x = np.random.random((27, 27)).astype(dtype) + y = np.random.random((4, 4)).astype(dtype) + if np.iscomplexobj(dtype()): + x += .1j + y -= .1j + + res = fftconvolve(x, y) + assert_allclose(res, convolve(x, y, method='direct')) + assert res.dtype == dtype + + +class TestMedFilt: + + IN = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], + [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], + [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], + [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], + [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], + [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], + [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], + [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], + [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], + [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] + + OUT = [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], + [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], + [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], + [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], + [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], + [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], + [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], + [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], + [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], + [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]] + + KERNEL_SIZE = [7,3] + + def test_basic(self): + d = signal.medfilt(self.IN, self.KERNEL_SIZE) + e = signal.medfilt2d(np.array(self.IN, float), self.KERNEL_SIZE) + assert_array_equal(d, self.OUT) + assert_array_equal(d, e) + + @pytest.mark.parametrize('dtype', [np.ubyte, np.byte, np.ushort, np.short, + np_ulong, np_long, np.ulonglong, np.ulonglong, + np.float32, np.float64]) + def test_types(self, dtype): + # volume input and output types match + in_typed = np.array(self.IN, dtype=dtype) + assert_equal(signal.medfilt(in_typed).dtype, dtype) + assert_equal(signal.medfilt2d(in_typed).dtype, dtype) + + def test_types_deprecated(self): + dtype = np.longdouble + in_typed = np.array(self.IN, dtype=dtype) + msg = "Using medfilt with arrays of dtype" + with pytest.deprecated_call(match=msg): + assert_equal(signal.medfilt(in_typed).dtype, dtype) + with pytest.deprecated_call(match=msg): + assert_equal(signal.medfilt2d(in_typed).dtype, dtype) + + + @pytest.mark.parametrize('dtype', [np.bool_, np.complex64, np.complex128, + np.clongdouble, np.float16,]) + def test_invalid_dtypes(self, dtype): + in_typed = np.array(self.IN, dtype=dtype) + with pytest.raises(ValueError, match="not supported"): + signal.medfilt(in_typed) + + with pytest.raises(ValueError, match="not supported"): + signal.medfilt2d(in_typed) + + def test_none(self): + # gh-1651, trac #1124. Ensure this does not segfault. + msg = "kernel_size exceeds volume.*|Using medfilt with arrays of dtype.*" + with pytest.warns((UserWarning, DeprecationWarning), match=msg): + assert_raises(TypeError, signal.medfilt, None) + + def test_odd_strides(self): + # Avoid a regression with possible contiguous + # numpy arrays that have odd strides. The stride value below gets + # us into wrong memory if used (but it does not need to be used) + dummy = np.arange(10, dtype=np.float64) + a = dummy[5:6] + a.strides = 16 + assert_(signal.medfilt(a, 1) == 5.) + + def test_refcounting(self): + # Check a refcounting-related crash + a = Decimal(123) + x = np.array([a, a], dtype=object) + if hasattr(sys, 'getrefcount'): + n = 2 * sys.getrefcount(a) + else: + n = 10 + # Shouldn't segfault: + msg = "kernel_size exceeds volume.*|Using medfilt with arrays of dtype.*" + with pytest.warns((UserWarning, DeprecationWarning), match=msg): + for j in range(n): + signal.medfilt(x) + if hasattr(sys, 'getrefcount'): + assert_(sys.getrefcount(a) < n) + assert_equal(x, [a, a]) + + def test_object(self,): + msg = "Using medfilt with arrays of dtype" + with pytest.deprecated_call(match=msg): + in_object = np.array(self.IN, dtype=object) + out_object = np.array(self.OUT, dtype=object) + assert_array_equal(signal.medfilt(in_object, self.KERNEL_SIZE), + out_object) + + @pytest.mark.parametrize("dtype", [np.ubyte, np.float32, np.float64]) + def test_medfilt2d_parallel(self, dtype): + in_typed = np.array(self.IN, dtype=dtype) + expected = np.array(self.OUT, dtype=dtype) + + # This is used to simplify the indexing calculations. + assert in_typed.shape == expected.shape + + # We'll do the calculation in four chunks. M1 and N1 are the dimensions + # of the first output chunk. We have to extend the input by half the + # kernel size to be able to calculate the full output chunk. + M1 = expected.shape[0] // 2 + N1 = expected.shape[1] // 2 + offM = self.KERNEL_SIZE[0] // 2 + 1 + offN = self.KERNEL_SIZE[1] // 2 + 1 + + def apply(chunk): + # in = slice of in_typed to use. + # sel = slice of output to crop it to the correct region. + # out = slice of output array to store in. + M, N = chunk + if M == 0: + Min = slice(0, M1 + offM) + Msel = slice(0, -offM) + Mout = slice(0, M1) + else: + Min = slice(M1 - offM, None) + Msel = slice(offM, None) + Mout = slice(M1, None) + if N == 0: + Nin = slice(0, N1 + offN) + Nsel = slice(0, -offN) + Nout = slice(0, N1) + else: + Nin = slice(N1 - offN, None) + Nsel = slice(offN, None) + Nout = slice(N1, None) + + # Do the calculation, but do not write to the output in the threads. + chunk_data = in_typed[Min, Nin] + med = signal.medfilt2d(chunk_data, self.KERNEL_SIZE) + return med[Msel, Nsel], Mout, Nout + + # Give each chunk to a different thread. + output = np.zeros_like(expected) + with ThreadPoolExecutor(max_workers=4) as pool: + chunks = {(0, 0), (0, 1), (1, 0), (1, 1)} + futures = {pool.submit(apply, chunk) for chunk in chunks} + + # Store each result in the output as it arrives. + for future in as_completed(futures): + data, Mslice, Nslice = future.result() + output[Mslice, Nslice] = data + + assert_array_equal(output, expected) + + +class TestWiener: + + def test_basic(self): + g = array([[5, 6, 4, 3], + [3, 5, 6, 2], + [2, 3, 5, 6], + [1, 6, 9, 7]], 'd') + h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], + [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], + [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], + [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) + assert_array_almost_equal(signal.wiener(g), h, decimal=6) + assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) + + +padtype_options = ["mean", "median", "minimum", "maximum", "line"] +padtype_options += _upfirdn_modes + + +class TestResample: + def test_basic(self): + # Some basic tests + + # Regression test for issue #3603. + # window.shape must equal to sig.shape[0] + sig = np.arange(128) + num = 256 + win = signal.get_window(('kaiser', 8.0), 160) + assert_raises(ValueError, signal.resample, sig, num, window=win) + + # Other degenerate conditions + assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) + assert_raises(ValueError, signal.resample_poly, sig, 1, 0) + assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='') + assert_raises(ValueError, signal.resample_poly, sig, 2, 1, + padtype='mean', cval=10) + + # test for issue #6505 - should not modify window.shape when axis ≠ 0 + sig2 = np.tile(np.arange(160), (2, 1)) + signal.resample(sig2, num, axis=-1, window=win) + assert_(win.shape == (160,)) + + @pytest.mark.parametrize('window', (None, 'hamming')) + @pytest.mark.parametrize('N', (20, 19)) + @pytest.mark.parametrize('num', (100, 101, 10, 11)) + def test_rfft(self, N, num, window): + # Make sure the speed up using rfft gives the same result as the normal + # way using fft + x = np.linspace(0, 10, N, endpoint=False) + y = np.cos(-x**2/6.0) + assert_allclose(signal.resample(y, num, window=window), + signal.resample(y + 0j, num, window=window).real) + + y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)]) + y_complex = y + 0j + assert_allclose( + signal.resample(y, num, axis=1, window=window), + signal.resample(y_complex, num, axis=1, window=window).real, + atol=1e-9) + + def test_input_domain(self): + # Test if both input domain modes produce the same results. + tsig = np.arange(256) + 0j + fsig = fft(tsig) + num = 256 + assert_allclose( + signal.resample(fsig, num, domain='freq'), + signal.resample(tsig, num, domain='time'), + atol=1e-9) + + @pytest.mark.parametrize('nx', (1, 2, 3, 5, 8)) + @pytest.mark.parametrize('ny', (1, 2, 3, 5, 8)) + @pytest.mark.parametrize('dtype', ('float', 'complex')) + def test_dc(self, nx, ny, dtype): + x = np.array([1] * nx, dtype) + y = signal.resample(x, ny) + assert_allclose(y, [1] * ny) + + @pytest.mark.parametrize('padtype', padtype_options) + def test_mutable_window(self, padtype): + # Test that a mutable window is not modified + impulse = np.zeros(3) + window = np.random.RandomState(0).randn(2) + window_orig = window.copy() + signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype) + assert_array_equal(window, window_orig) + + @pytest.mark.parametrize('padtype', padtype_options) + def test_output_float32(self, padtype): + # Test that float32 inputs yield a float32 output + x = np.arange(10, dtype=np.float32) + h = np.array([1, 1, 1], dtype=np.float32) + y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype) + assert y.dtype == np.float32 + + @pytest.mark.parametrize('padtype', padtype_options) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_output_match_dtype(self, padtype, dtype): + # Test that the dtype of x is preserved per issue #14733 + x = np.arange(10, dtype=dtype) + y = signal.resample_poly(x, 1, 2, padtype=padtype) + assert y.dtype == x.dtype + + @pytest.mark.parametrize( + "method, ext, padtype", + [("fft", False, None)] + + list( + product( + ["polyphase"], [False, True], padtype_options, + ) + ), + ) + def test_resample_methods(self, method, ext, padtype): + # Test resampling of sinusoids and random noise (1-sec) + rate = 100 + rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] + + # Sinusoids, windowed to avoid edge artifacts + t = np.arange(rate) / float(rate) + freqs = np.array((1., 10., 40.))[:, np.newaxis] + x = np.sin(2 * np.pi * freqs * t) * hann(rate) + + for rate_to in rates_to: + t_to = np.arange(rate_to) / float(rate_to) + y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) + if method == 'fft': + y_resamps = signal.resample(x, rate_to, axis=-1) + else: + if ext and rate_to != rate: + # Match default window design + g = gcd(rate_to, rate) + up = rate_to // g + down = rate // g + max_rate = max(up, down) + f_c = 1. / max_rate + half_len = 10 * max_rate + window = signal.firwin(2 * half_len + 1, f_c, + window=('kaiser', 5.0)) + polyargs = {'window': window, 'padtype': padtype} + else: + polyargs = {'padtype': padtype} + + y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, + **polyargs) + + for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): + if freq >= 0.5 * rate_to: + y_to.fill(0.) # mostly low-passed away + if padtype in ['minimum', 'maximum']: + assert_allclose(y_resamp, y_to, atol=3e-1) + else: + assert_allclose(y_resamp, y_to, atol=1e-3) + else: + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=(corr, rate, rate_to)) + + # Random data + rng = np.random.RandomState(0) + x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind + for rate_to in rates_to: + # random data + t_to = np.arange(rate_to) / float(rate_to) + y_to = np.interp(t_to, t, x) + if method == 'fft': + y_resamp = signal.resample(x, rate_to) + else: + y_resamp = signal.resample_poly(x, rate_to, rate, + padtype=padtype) + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=corr) + + # More tests of fft method (Master 0.18.1 fails these) + if method == 'fft': + x1 = np.array([1.+0.j, 0.+0.j]) + y1_test = signal.resample(x1, 4) + # upsampling a complex array + y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j]) + assert_allclose(y1_test, y1_true, atol=1e-12) + x2 = np.array([1., 0.5, 0., 0.5]) + y2_test = signal.resample(x2, 2) # downsampling a real array + y2_true = np.array([1., 0.]) + assert_allclose(y2_test, y2_true, atol=1e-12) + + def test_poly_vs_filtfilt(self): + # Check that up=1.0 gives same answer as filtfilt + slicing + random_state = np.random.RandomState(17) + try_types = (int, np.float32, np.complex64, float, complex) + size = 10000 + down_factors = [2, 11, 79] + + for dtype in try_types: + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + + # resample_poly assumes zeros outside of signl, whereas filtfilt + # can only constant-pad. Make them equivalent: + x[0] = 0 + x[-1] = 0 + + for down in down_factors: + h = signal.firwin(31, 1. / down, window='hamming') + yf = filtfilt(h, 1.0, x, padtype='constant')[::down] + + # Need to pass convolved version of filter to resample_poly, + # since filtfilt does forward and backward, but resample_poly + # only goes forward + hc = convolve(h, h[::-1]) + y = signal.resample_poly(x, 1, down, window=hc) + assert_allclose(yf, y, atol=1e-7, rtol=1e-7) + + def test_correlate1d(self): + for down in [2, 4]: + for nx in range(1, 40, down): + for nweights in (32, 33): + x = np.random.random((nx,)) + weights = np.random.random((nweights,)) + y_g = correlate1d(x, weights[::-1], mode='constant') + y_s = signal.resample_poly( + x, up=1, down=down, window=weights) + assert_allclose(y_g[::down], y_s) + + +class TestCSpline1DEval: + + def test_basic(self): + y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) + x = arange(len(y)) + dx = x[1] - x[0] + cj = signal.cspline1d(y) + + x2 = arange(len(y) * 10.0) / 10.0 + y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) + + # make sure interpolated values are on knot points + assert_array_almost_equal(y2[::10], y, decimal=5) + + def test_complex(self): + # create some smoothly varying complex signal to interpolate + x = np.arange(2) + y = np.zeros(x.shape, dtype=np.complex64) + T = 10.0 + f = 1.0 / T + y = np.exp(2.0J * np.pi * f * x) + + # get the cspline transform + cy = signal.cspline1d(y) + + # determine new test x value and interpolate + xnew = np.array([0.5]) + ynew = signal.cspline1d_eval(cy, xnew) + + assert_equal(ynew.dtype, y.dtype) + +class TestOrderFilt: + + def test_basic(self): + assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), + [2, 3, 2]) + + +class _TestLinearFilter: + + def generate(self, shape): + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + return self.convert_dtype(x) + + def convert_dtype(self, arr): + if self.dtype == np.dtype('O'): + arr = np.asarray(arr) + out = np.empty(arr.shape, self.dtype) + iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], + [['readonly'],['writeonly']]) + for x, y in iter: + y[...] = self.type(x[()]) + return out + else: + return np.asarray(arr, dtype=self.dtype) + + def test_rank_1_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, -0.5]) + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_IIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([0.5, -0.5]) + zi = self.convert_dtype([1, 2]) + y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) + zf_r = self.convert_dtype([13, -10]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_1_FIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1, 1]) + a = self.convert_dtype([1]) + zi = self.convert_dtype([1, 1]) + y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) + zf_r = self.convert_dtype([9, 5]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_0(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], + [6, 4, 2]]) + y = lfilter(b, a, x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + def test_rank_2_IIR_axis_1(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]]) + y = lfilter(b, a, x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank_2_IIR_axis_0_init_cond(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((4,1))) + + y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], + [19, -17, 19]]) + zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] + y, zf = lfilter(b, a, x, axis=1, zi=zi) + assert_array_almost_equal(y_r2_a0_1, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_1_init_cond(self): + x = self.generate((4,3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((1,3))) + + y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], + [1, 3, 5], [5, 3, 1]]) + zf_r = self.convert_dtype([[-23, -23, -23]]) + y, zf = lfilter(b, a, x, axis=0, zi=zi) + assert_array_almost_equal(y_r2_a0_0, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_IIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_IIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 1 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1]) + y, zf = lfilter(b, a, x, axis, zi) + def lf0(w): + return lfilter(b, a, w, zi=zi1)[0] + def lf1(w): + return lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_FIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_FIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 2 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1, 1]) + y, zf = lfilter(b, a, x, axis, zi) + def lf0(w): + return lfilter(b, a, w, zi=zi1)[0] + def lf1(w): + return lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_zi_pseudobroadcast(self): + x = self.generate((4, 5, 20)) + b,a = signal.butter(8, 0.2, output='ba') + b = self.convert_dtype(b) + a = self.convert_dtype(a) + zi_size = b.shape[0] - 1 + + # lfilter requires x.ndim == zi.ndim exactly. However, zi can have + # length 1 dimensions. + zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) + zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) + + y_full, zf_full = lfilter(b, a, x, zi=zi_full) + y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) + + assert_array_almost_equal(y_sing, y_full) + assert_array_almost_equal(zf_full, zf_sing) + + # lfilter does not prepend ones + assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) + + def test_scalar_a(self): + # a can be a scalar. + x = self.generate(6) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) + + y = lfilter(b, a[0], x) + assert_array_almost_equal(y, y_r) + + def test_zi_some_singleton_dims(self): + # lfilter doesn't really broadcast (no prepending of 1's). But does + # do singleton expansion if x and zi have the same ndim. This was + # broken only if a subset of the axes were singletons (gh-4681). + x = self.convert_dtype(np.zeros((3,2,5), 'l')) + b = self.convert_dtype(np.ones(5, 'l')) + a = self.convert_dtype(np.array([1,0,0])) + zi = np.ones((3,1,4), 'l') + zi[1,:,:] *= 2 + zi[2,:,:] *= 3 + zi = self.convert_dtype(zi) + + zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) + y_expected = np.zeros((3,2,5), 'l') + y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] + y_expected = self.convert_dtype(y_expected) + + # IIR + y_iir, zf_iir = lfilter(b, a, x, -1, zi) + assert_array_almost_equal(y_iir, y_expected) + assert_array_almost_equal(zf_iir, zf_expected) + + # FIR + y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) + assert_array_almost_equal(y_fir, y_expected) + assert_array_almost_equal(zf_fir, zf_expected) + + def base_bad_size_zi(self, b, a, x, axis, zi): + b = self.convert_dtype(b) + a = self.convert_dtype(a) + x = self.convert_dtype(x) + zi = self.convert_dtype(zi) + assert_raises(ValueError, lfilter, b, a, x, axis, zi) + + def test_bad_size_zi(self): + # rank 1 + x1 = np.arange(6) + self.base_bad_size_zi([1], [1], x1, -1, [1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) + + # rank 2 + x2 = np.arange(12).reshape((4,3)) + # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) + self.base_bad_size_zi([1], [1], x2, 0, [0]) + + # for each of these there are 5 cases tested (in this order): + # 1. not deep enough, right # elements + # 2. too deep, right # elements + # 3. right depth, right # elements, transposed + # 4. right depth, too few elements + # 5. right depth, too many elements + + self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) + self.base_bad_size_zi([1], [1], x2, 1, [0]) + + self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + def test_empty_zi(self): + # Regression test for #880: empty array for zi crashes. + x = self.generate((5,)) + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + zi = self.convert_dtype([]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, x) + assert_equal(zf.dtype, self.dtype) + assert_equal(zf.size, 0) + + def test_lfiltic_bad_zi(self): + # Regression test for #3699: bad initial conditions + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + # "y" sets the datatype of zi, so it truncates if int + zi = lfiltic(b, a, [1., 0]) + zi_1 = lfiltic(b, a, [1, 0]) + zi_2 = lfiltic(b, a, [True, False]) + assert_array_equal(zi, zi_1) + assert_array_equal(zi, zi_2) + + def test_short_x_FIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([7, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_short_x_IIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1, 1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([-67, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_do_not_modify_a_b_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + b0 = b.copy() + a = self.convert_dtype([0.5, -0.5]) + a0 = a.copy() + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + def test_do_not_modify_a_b_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, 1]) + b0 = b.copy() + a = self.convert_dtype([2]) + a0 = a.copy() + y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + @pytest.mark.parametrize("a", [1.0, [1.0], np.array(1.0)]) + @pytest.mark.parametrize("b", [1.0, [1.0], np.array(1.0)]) + def test_scalar_input(self, a, b): + data = np.random.randn(10) + assert_allclose( + lfilter(np.array([1.0]), np.array([1.0]), data), + lfilter(b, a, data)) + + +class TestLinearFilterFloat32(_TestLinearFilter): + dtype = np.dtype('f') + + +class TestLinearFilterFloat64(_TestLinearFilter): + dtype = np.dtype('d') + + +class TestLinearFilterFloatExtended(_TestLinearFilter): + dtype = np.dtype('g') + + +class TestLinearFilterComplex64(_TestLinearFilter): + dtype = np.dtype('F') + + +class TestLinearFilterComplex128(_TestLinearFilter): + dtype = np.dtype('D') + + +class TestLinearFilterComplexExtended(_TestLinearFilter): + dtype = np.dtype('G') + +class TestLinearFilterDecimal(_TestLinearFilter): + dtype = np.dtype('O') + + def type(self, x): + return Decimal(str(x)) + + +class TestLinearFilterObject(_TestLinearFilter): + dtype = np.dtype('O') + type = float + + +def test_lfilter_bad_object(): + # lfilter: object arrays with non-numeric objects raise TypeError. + # Regression test for ticket #1452. + if hasattr(sys, 'abiflags') and 'd' in sys.abiflags: + pytest.skip('test is flaky when run with python3-dbg') + assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) + assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) + assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) + + +def test_lfilter_notimplemented_input(): + # Should not crash, gh-7991 + assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) + + +@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, + np_ulong, np_long, np.ulonglong, np.ulonglong, + np.float32, np.float64, np.longdouble, + Decimal]) +class TestCorrelateReal: + def _setup_rank1(self, dt): + a = np.linspace(0, 3, 4).astype(dt) + b = np.linspace(1, 2, 2).astype(dt) + + y_r = np.array([0, 2, 5, 8, 3]).astype(dt) + return a, b, y_r + + def equal_tolerance(self, res_dt): + # default value of keyword + decimal = 6 + try: + dt_info = np.finfo(res_dt) + if hasattr(dt_info, 'resolution'): + decimal = int(-0.5*np.log10(dt_info.resolution)) + except Exception: + pass + return decimal + + def equal_tolerance_fft(self, res_dt): + # FFT implementations convert longdouble arguments down to + # double so don't expect better precision, see gh-9520 + if res_dt == np.longdouble: + return self.equal_tolerance(np.float64) + else: + return self.equal_tolerance(res_dt) + + def test_method(self, dt): + if dt == Decimal: + method = choose_conv_method([Decimal(4)], [Decimal(3)]) + assert_equal(method, 'direct') + else: + a, b, y_r = self._setup_rank3(dt) + y_fft = correlate(a, b, method='fft') + y_direct = correlate(a, b, method='direct') + + assert_array_almost_equal(y_r, + y_fft, + decimal=self.equal_tolerance_fft(y_fft.dtype),) + assert_array_almost_equal(y_r, + y_direct, + decimal=self.equal_tolerance(y_direct.dtype),) + assert_equal(y_fft.dtype, dt) + assert_equal(y_direct.dtype, dt) + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r[1:4]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[1:4][::-1]) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r[:-1]) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + def _setup_rank3(self, dt): + a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( + dt) + b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( + dt) + + y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], + [46., 432., 1062., 1840., 2672., 1698., 864., 266.], + [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], + [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], + [202., 664., 1290., 1984., 2688., 1590., 712., 150.], + [114., 344., 642., 960., 1280., 726., 296., 38.]], + + [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], + [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], + [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], + [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], + [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], + [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], + + [[22., 214., 528., 916., 1332., 846., 430., 132.], + [86., 484., 1098., 1832., 2600., 1602., 772., 206.], + [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], + [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], + [230., 692., 1290., 1928., 2568., 1458., 596., 78.], + [126., 354., 636., 924., 1212., 654., 234., 0.]]], + dtype=np.float64).astype(dt) + + return a, b, y_r + + def test_rank3_valid(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) + assert_equal(y.dtype, dt) + + def test_rank3_same(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "same") + assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) + assert_equal(y.dtype, dt) + + def test_rank3_all(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b) + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + +class TestCorrelate: + # Tests that don't depend on dtype + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, correlate, a, b, mode='spam') + assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') + assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') + assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') + assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, correlate, [1], 2, method='direct') + assert_raises(ValueError, correlate, 1, [2], method='direct') + assert_raises(ValueError, correlate, [1], 2, method='fft') + assert_raises(ValueError, correlate, 1, [2], method='fft') + assert_raises(ValueError, correlate, [1], [[2]]) + assert_raises(ValueError, correlate, [3], 2) + + def test_numpy_fastpath(self): + a = [1, 2, 3] + b = [4, 5] + assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) + + a = [1, 2, 3] + b = [4, 5, 6] + assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) + assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) + assert_allclose(correlate(a, b, mode='valid'), [32]) + + +@pytest.mark.parametrize("mode", ["valid", "same", "full"]) +@pytest.mark.parametrize("behind", [True, False]) +@pytest.mark.parametrize("input_size", [100, 101, 1000, 1001, 10000, 10001]) +def test_correlation_lags(mode, behind, input_size): + # generate random data + rng = np.random.RandomState(0) + in1 = rng.standard_normal(input_size) + offset = int(input_size/10) + # generate offset version of array to correlate with + if behind: + # y is behind x + in2 = np.concatenate([rng.standard_normal(offset), in1]) + expected = -offset + else: + # y is ahead of x + in2 = in1[offset:] + expected = offset + # cross correlate, returning lag information + correlation = correlate(in1, in2, mode=mode) + lags = correlation_lags(in1.size, in2.size, mode=mode) + # identify the peak + lag_index = np.argmax(correlation) + # Check as expected + assert_equal(lags[lag_index], expected) + # Correlation and lags shape should match + assert_equal(lags.shape, correlation.shape) + + +@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) +class TestCorrelateComplex: + # The decimal precision to be used for comparing results. + # This value will be passed as the 'decimal' keyword argument of + # assert_array_almost_equal(). + # Since correlate may chose to use FFT method which converts + # longdoubles to doubles internally don't expect better precision + # for longdouble than for double (see gh-9520). + + def decimal(self, dt): + if dt == np.clongdouble: + dt = np.cdouble + return int(2 * np.finfo(dt).precision / 3) + + def _setup_rank1(self, dt, mode): + np.random.seed(9) + a = np.random.randn(10).astype(dt) + a += 1j * np.random.randn(10).astype(dt) + b = np.random.randn(8).astype(dt) + b += 1j * np.random.randn(8).astype(dt) + + y_r = (correlate(a.real, b.real, mode=mode) + + correlate(a.imag, b.imag, mode=mode)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + + correlate(a.imag, b.real, mode=mode)) + return a, b, y_r + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt, 'valid') + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt, 'same') + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt, 'full') + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_swap_full(self, dt): + d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) + k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) + y = correlate(d, k) + assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) + + def test_swap_same(self, dt): + d = [0.+0.j, 1.+1.j, 2.+2.j] + k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] + y = correlate(d, k, mode="same") + assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) + + def test_rank3(self, dt): + a = np.random.randn(10, 8, 6).astype(dt) + a += 1j * np.random.randn(10, 8, 6).astype(dt) + b = np.random.randn(8, 6, 4).astype(dt) + b += 1j * np.random.randn(8, 6, 4).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + def test_rank0(self, dt): + a = np.array(np.random.randn()).astype(dt) + a += 1j * np.array(np.random.randn()).astype(dt) + b = np.array(np.random.randn()).astype(dt) + b += 1j * np.array(np.random.randn()).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * np.array(-correlate(a.real, b.imag) + + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + assert_equal(correlate([1], [2j]), correlate(1, 2j)) + assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) + assert_equal(correlate([3j], [4]), correlate(3j, 4)) + + +class TestCorrelate2d: + + def test_consistency_correlate_funcs(self): + # Compare np.correlate, signal.correlate, signal.correlate2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.correlate(a, b, mode=mode), + signal.correlate(a, b, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], + mode=mode)), + signal.correlate(a, b, mode=mode)) + + # See gh-5897 + if mode == 'valid': + assert_almost_equal(np.correlate(b, a, mode=mode), + signal.correlate(b, a, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], + mode=mode)), + signal.correlate(b, a, mode=mode)) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) + + def test_complex_input(self): + assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) + assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) + assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) + + +class TestLFilterZI: + + def test_basic(self): + a = np.array([1.0, -1.0, 0.5]) + b = np.array([1.0, 0.0, 2.0]) + zi_expected = np.array([5.0, -1.0]) + zi = lfilter_zi(b, a) + assert_array_almost_equal(zi, zi_expected) + + def test_scale_invariance(self): + # Regression test. There was a bug in which b was not correctly + # rescaled when a[0] was nonzero. + b = np.array([2, 8, 5]) + a = np.array([1, 1, 8]) + zi1 = lfilter_zi(b, a) + zi2 = lfilter_zi(2*b, 2*a) + assert_allclose(zi2, zi1, rtol=1e-12) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_types(self, dtype): + b = np.zeros((8), dtype=dtype) + a = np.array([1], dtype=dtype) + assert_equal(np.real(signal.lfilter_zi(b, a)).dtype, dtype) + + +class TestFiltFilt: + filtfilt_kind = 'tf' + + def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, + method='pad', irlen=None): + if self.filtfilt_kind == 'tf': + b, a = zpk2tf(*zpk) + return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) + elif self.filtfilt_kind == 'sos': + sos = zpk2sos(*zpk) + return sosfiltfilt(sos, x, axis, padtype, padlen) + + def test_basic(self): + zpk = tf2zpk([1, 2, 3], [1, 2, 3]) + out = self.filtfilt(zpk, np.arange(12)) + assert_allclose(out, arange(12), atol=5.28e-11) + + def test_sine(self): + rate = 2000 + t = np.linspace(0, 1.0, rate + 1) + # A signal with low frequency and a high frequency. + xlow = np.sin(5 * 2 * np.pi * t) + xhigh = np.sin(250 * 2 * np.pi * t) + x = xlow + xhigh + + zpk = butter(8, 0.125, output='zpk') + # r is the magnitude of the largest pole. + r = np.abs(zpk[1]).max() + eps = 1e-5 + # n estimates the number of steps for the + # transient to decay by a factor of eps. + n = int(np.ceil(np.log(eps) / np.log(r))) + + # High order lowpass filter... + y = self.filtfilt(zpk, x, padlen=n) + # Result should be just xlow. + err = np.abs(y - xlow).max() + assert_(err < 1e-4) + + # A 2D case. + x2d = np.vstack([xlow, xlow + xhigh]) + y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) + assert_equal(y2d.shape, x2d.shape) + err = np.abs(y2d - xlow).max() + assert_(err < 1e-4) + + # Use the previous result to check the use of the axis keyword. + # (Regression test for ticket #1620) + y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) + assert_equal(y2d, y2dt.T) + + def test_axis(self): + # Test the 'axis' keyword on a 3D array. + x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) + zpk = butter(3, 0.125, output='zpk') + y0 = self.filtfilt(zpk, x, padlen=0, axis=0) + y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) + assert_array_equal(y0, np.swapaxes(y1, 0, 1)) + y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) + assert_array_equal(y0, np.swapaxes(y2, 0, 2)) + + def test_acoeff(self): + if self.filtfilt_kind != 'tf': + return # only necessary for TF + # test for 'a' coefficient as single number + out = signal.filtfilt([.5, .5], 1, np.arange(10)) + assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) + + def test_gust_simple(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The input array has length 2. The exact solution for this case + # was computed "by hand". + x = np.array([1.0, 2.0]) + b = np.array([0.5]) + a = np.array([1.0, -0.5]) + y, z1, z2 = _filtfilt_gust(b, a, x) + assert_allclose([z1[0], z2[0]], + [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) + assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], + 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) + + def test_gust_scalars(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The filter coefficients are both scalars, so the filter simply + # multiplies its input by b/a. When it is used in filtfilt, the + # factor is (b/a)**2. + x = np.arange(12) + b = 3.0 + a = 2.0 + y = filtfilt(b, a, x, method="gust") + expected = (b/a)**2 * x + assert_allclose(y, expected) + + +class TestSOSFiltFilt(TestFiltFilt): + filtfilt_kind = 'sos' + + def test_equivalence(self): + """Test equivalence between sosfiltfilt and filtfilt""" + x = np.random.RandomState(0).randn(1000) + for order in range(1, 6): + zpk = signal.butter(order, 0.35, output='zpk') + b, a = zpk2tf(*zpk) + sos = zpk2sos(*zpk) + y = filtfilt(b, a, x) + y_sos = sosfiltfilt(sos, x) + assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) + + +def filtfilt_gust_opt(b, a, x): + """ + An alternative implementation of filtfilt with Gustafsson edges. + + This function computes the same result as + `scipy.signal._signaltools._filtfilt_gust`, but only 1-d arrays + are accepted. The problem is solved using `fmin` from `scipy.optimize`. + `_filtfilt_gust` is significantly faster than this implementation. + """ + def filtfilt_gust_opt_func(ics, b, a, x): + """Objective function used in filtfilt_gust_opt.""" + m = max(len(a), len(b)) - 1 + z0f = ics[:m] + z0b = ics[m:] + y_f = lfilter(b, a, x, zi=z0f)[0] + y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] + + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y_bf = lfilter(b, a, y_b, zi=z0f)[0] + value = np.sum((y_fb - y_bf)**2) + return value + + m = max(len(a), len(b)) - 1 + zi = lfilter_zi(b, a) + ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) + result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), + xtol=1e-10, ftol=1e-12, + maxfun=10000, maxiter=10000, + full_output=True, disp=False) + opt, fopt, niter, funcalls, warnflag = result + if warnflag > 0: + raise RuntimeError("minimization failed in filtfilt_gust_opt: " + "warnflag=%d" % warnflag) + z0f = opt[:m] + z0b = opt[m:] + + # Apply the forward-backward filter using the computed initial + # conditions. + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y = lfilter(b, a, y_b, zi=z0f)[0] + + return y, z0f, z0b + + +def check_filtfilt_gust(b, a, shape, axis, irlen=None): + # Generate x, the data to be filtered. + np.random.seed(123) + x = np.random.randn(*shape) + + # Apply filtfilt to x. This is the main calculation to be checked. + y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) + + # Also call the private function so we can test the ICs. + yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + + # filtfilt_gust_opt is an independent implementation that gives the + # expected result, but it only handles 1-D arrays, so use some looping + # and reshaping shenanigans to create the expected output arrays. + xx = np.swapaxes(x, axis, -1) + out_shape = xx.shape[:-1] + yo = np.empty_like(xx) + m = max(len(a), len(b)) - 1 + zo1 = np.empty(out_shape + (m,)) + zo2 = np.empty(out_shape + (m,)) + for indx in product(*[range(d) for d in out_shape]): + yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) + yo = np.swapaxes(yo, -1, axis) + zo1 = np.swapaxes(zo1, -1, axis) + zo2 = np.swapaxes(zo2, -1, axis) + + assert_allclose(y, yo, rtol=1e-8, atol=1e-9) + assert_allclose(yg, yo, rtol=1e-8, atol=1e-9) + assert_allclose(zg1, zo1, rtol=1e-8, atol=1e-9) + assert_allclose(zg2, zo2, rtol=1e-8, atol=1e-9) + + +def test_choose_conv_method(): + for mode in ['valid', 'same', 'full']: + for ndim in [1, 2]: + n, k, true_method = 8, 6, 'direct' + x = np.random.randn(*((n,) * ndim)) + h = np.random.randn(*((k,) * ndim)) + + method = choose_conv_method(x, h, mode=mode) + assert_equal(method, true_method) + + method_try, times = choose_conv_method(x, h, mode=mode, measure=True) + assert_(method_try in {'fft', 'direct'}) + assert_(isinstance(times, dict)) + assert_('fft' in times.keys() and 'direct' in times.keys()) + + n = 10 + for not_fft_conv_supp in ["complex256", "complex192"]: + if hasattr(np, not_fft_conv_supp): + x = np.ones(n, dtype=not_fft_conv_supp) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + x = np.array([2**51], dtype=np.int64) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + x = [Decimal(3), Decimal(2)] + h = [Decimal(1), Decimal(4)] + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + +def test_filtfilt_gust(): + # Design a filter. + z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') + + # Find the approximate impulse response length of the filter. + eps = 1e-10 + r = np.max(np.abs(p)) + approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + + np.random.seed(123) + + b, a = zpk2tf(z, p, k) + for irlen in [None, approx_impulse_len]: + signal_len = 5 * approx_impulse_len + + # 1-d test case + check_filtfilt_gust(b, a, (signal_len,), 0, irlen) + + # 3-d test case; test each axis. + for axis in range(3): + shape = [2, 2, 2] + shape[axis] = signal_len + check_filtfilt_gust(b, a, shape, axis, irlen) + + # Test case with length less than 2*approx_impulse_len. + # In this case, `filtfilt_gust` should behave the same as if + # `irlen=None` was given. + length = 2*approx_impulse_len - 50 + check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) + + +class TestDecimate: + def test_bad_args(self): + x = np.arange(12) + assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) + assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) + + def test_basic_IIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_basic_FIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_shape(self): + # Regression test for ticket #1480. + z = np.zeros((30, 30)) + d0 = signal.decimate(z, 2, axis=0, zero_phase=False) + assert_equal(d0.shape, (15, 30)) + d1 = signal.decimate(z, 2, axis=1, zero_phase=False) + assert_equal(d1.shape, (30, 15)) + + def test_phaseshift_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=False) + + def test_zero_phase_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=True) + + def test_phaseshift_IIR(self): + self._test_phaseshift(method='iir', zero_phase=False) + + def test_zero_phase_IIR(self): + self._test_phaseshift(method='iir', zero_phase=True) + + def _test_phaseshift(self, method, zero_phase): + rate = 120 + rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 + + t_tot = 100 # Need to let antialiasing filters settle + t = np.arange(rate*t_tot+1) / float(rate) + + # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts + freqs = np.array(rates_to) * 0.8 / 2 + d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) + * signal.windows.tukey(t.size, 0.1)) + + for rate_to in rates_to: + q = rate // rate_to + t_to = np.arange(rate_to*t_tot+1) / float(rate_to) + d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) + * signal.windows.tukey(t_to.size, 0.1)) + + # Set up downsampling filters, match v0.17 defaults + if method == 'fir': + n = 30 + system = signal.dlti(signal.firwin(n + 1, 1. / q, + window='hamming'), 1.) + elif method == 'iir': + n = 8 + wc = 0.8*np.pi/q + system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) + + # Calculate expected phase response, as unit complex vector + if zero_phase is False: + _, h_resps = signal.freqz(system.num, system.den, + freqs/rate*2*np.pi) + h_resps /= np.abs(h_resps) + else: + h_resps = np.ones_like(freqs) + + y_resamps = signal.decimate(d.real, q, n, ftype=system, + zero_phase=zero_phase) + + # Get phase from complex inner product, like CSD + h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) + h_resamps /= np.abs(h_resamps) + subnyq = freqs < 0.5*rate_to + + # Complex vectors should be aligned, only compare below nyquist + assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, + atol=1e-3, rtol=1e-3) + + def test_auto_n(self): + # Test that our value of n is a reasonable choice (depends on + # the downsampling factor) + sfreq = 100. + n = 1000 + t = np.arange(n) / sfreq + # will alias for decimations (>= 15) + x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) + assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) + x_out = signal.decimate(x, 30, ftype='fir') + assert_array_less(np.linalg.norm(x_out), 0.01) + + def test_long_float32(self): + # regression: gh-15072. With 32-bit float and either lfilter + # or filtfilt, this is numerically unstable + x = signal.decimate(np.ones(10_000, dtype=np.float32), 10) + assert not any(np.isnan(x)) + + def test_float16_upcast(self): + # float16 must be upcast to float64 + x = signal.decimate(np.ones(100, dtype=np.float16), 10) + assert x.dtype.type == np.float64 + + def test_complex_iir_dlti(self): + # regression: gh-17845 + # centre frequency for filter [Hz] + fcentre = 50 + # filter passband width [Hz] + fwidth = 5 + # sample rate [Hz] + fs = 1e3 + + z, p, k = signal.butter(2, 2*np.pi*fwidth/2, output='zpk', fs=fs) + z = z.astype(complex) * np.exp(2j * np.pi * fcentre/fs) + p = p.astype(complex) * np.exp(2j * np.pi * fcentre/fs) + system = signal.dlti(z, p, k) + + t = np.arange(200) / fs + + # input + u = (np.exp(2j * np.pi * fcentre * t) + + 0.5 * np.exp(-2j * np.pi * fcentre * t)) + + ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) + ynzpref = signal.lfilter(*signal.zpk2tf(z, p, k), + u)[::2] + + assert_equal(ynzp, ynzpref) + + yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) + yzpref = signal.filtfilt(*signal.zpk2tf(z, p, k), + u)[::2] + + assert_allclose(yzp, yzpref, rtol=1e-10, atol=1e-13) + + def test_complex_fir_dlti(self): + # centre frequency for filter [Hz] + fcentre = 50 + # filter passband width [Hz] + fwidth = 5 + # sample rate [Hz] + fs = 1e3 + numtaps = 20 + + # FIR filter about 0Hz + bbase = signal.firwin(numtaps, fwidth/2, fs=fs) + + # rotate these to desired frequency + zbase = np.roots(bbase) + zrot = zbase * np.exp(2j * np.pi * fcentre/fs) + # FIR filter about 50Hz, maintaining passband gain of 0dB + bz = bbase[0] * np.poly(zrot) + + system = signal.dlti(bz, 1) + + t = np.arange(200) / fs + + # input + u = (np.exp(2j * np.pi * fcentre * t) + + 0.5 * np.exp(-2j * np.pi * fcentre * t)) + + ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) + ynzpref = signal.upfirdn(bz, u, up=1, down=2)[:100] + + assert_equal(ynzp, ynzpref) + + yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) + yzpref = signal.resample_poly(u, 1, 2, window=bz) + + assert_equal(yzp, yzpref) + + +class TestHilbert: + + def test_bad_args(self): + x = np.array([1.0 + 0.0j]) + assert_raises(ValueError, hilbert, x) + x = np.arange(8.0) + assert_raises(ValueError, hilbert, x, N=0) + + def test_hilbert_theoretical(self): + # test cases by Ariel Rokem + decimal = 14 + + pi = np.pi + t = np.arange(0, 2 * pi, pi / 256) + a0 = np.sin(t) + a1 = np.cos(t) + a2 = np.sin(2 * t) + a3 = np.cos(2 * t) + a = np.vstack([a0, a1, a2, a3]) + + h = hilbert(a) + h_abs = np.abs(h) + h_angle = np.angle(h) + h_real = np.real(h) + + # The real part should be equal to the original signals: + assert_almost_equal(h_real, a, decimal) + # The absolute value should be one everywhere, for this input: + assert_almost_equal(h_abs, np.ones(a.shape), decimal) + # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in + # the first 256 bins: + assert_almost_equal(h_angle[0, :256], + np.arange(-pi / 2, pi / 2, pi / 256), + decimal) + # For the 'slow' cosine - the phase should go from 0 to pi in the + # same interval: + assert_almost_equal( + h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) + # The 'fast' sine should make this phase transition in half the time: + assert_almost_equal(h_angle[2, :128], + np.arange(-pi / 2, pi / 2, pi / 128), + decimal) + # Ditto for the 'fast' cosine: + assert_almost_equal( + h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) + + # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia + assert_almost_equal(h[1].imag, a0, decimal) + + def test_hilbert_axisN(self): + # tests for axis and N arguments + a = np.arange(18).reshape(3, 6) + # test axis + aa = hilbert(a, axis=-1) + assert_equal(hilbert(a.T, axis=0), aa.T) + # test 1d + assert_almost_equal(hilbert(a[0]), aa[0], 14) + + # test N + aan = hilbert(a, N=20, axis=-1) + assert_equal(aan.shape, [3, 20]) + assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) + # the next test is just a regression test, + # no idea whether numbers make sense + a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, + 1.000000000000000e+00 - 2.047794505137069j, + 1.999999999999999e+00 - 2.244055555687583j, + 3.000000000000000e+00 - 1.262750302935009j, + 4.000000000000000e+00 - 1.066489252384493j, + 5.000000000000000e+00 + 2.918022706971047j, + 8.881784197001253e-17 + 3.845658908989067j, + -9.444121133484362e-17 + 0.985044202202061j, + -1.776356839400251e-16 + 1.332257797702019j, + -3.996802888650564e-16 + 0.501905089898885j, + 1.332267629550188e-16 + 0.668696078880782j, + -1.192678053963799e-16 + 0.235487067862679j, + -1.776356839400251e-16 + 0.286439612812121j, + 3.108624468950438e-16 + 0.031676888064907j, + 1.332267629550188e-16 - 0.019275656884536j, + -2.360035624836702e-16 - 0.1652588660287j, + 0.000000000000000e+00 - 0.332049855010597j, + 3.552713678800501e-16 - 0.403810179797771j, + 8.881784197001253e-17 - 0.751023775297729j, + 9.444121133484362e-17 - 0.79252210110103j]) + assert_almost_equal(aan[0], a0hilb, 14, 'N regression') + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_hilbert_types(self, dtype): + in_typed = np.zeros(8, dtype=dtype) + assert_equal(np.real(signal.hilbert(in_typed)).dtype, dtype) + + +class TestHilbert2: + + def test_bad_args(self): + # x must be real. + x = np.array([[1.0 + 0.0j]]) + assert_raises(ValueError, hilbert2, x) + + # x must be rank 2. + x = np.arange(24).reshape(2, 3, 4) + assert_raises(ValueError, hilbert2, x) + + # Bad value for N. + x = np.arange(16).reshape(4, 4) + assert_raises(ValueError, hilbert2, x, N=0) + assert_raises(ValueError, hilbert2, x, N=(2, 0)) + assert_raises(ValueError, hilbert2, x, N=(2,)) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_hilbert2_types(self, dtype): + in_typed = np.zeros((2, 32), dtype=dtype) + assert_equal(np.real(signal.hilbert2(in_typed)).dtype, dtype) + + +class TestPartialFractionExpansion: + @staticmethod + def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7): + r_true = np.asarray(r_true) + p_true = np.asarray(p_true) + + distance = np.hypot(abs(p[:, None] - p_true), + abs(r[:, None] - r_true)) + + rows, cols = linear_sum_assignment(distance) + assert_almost_equal(p[rows], p_true[cols], decimal=decimal) + assert_almost_equal(r[rows], r_true[cols], decimal=decimal) + + def test_compute_factors(self): + factors, poly = _compute_factors([1, 2, 3], [3, 2, 1]) + assert_equal(len(factors), 3) + assert_almost_equal(factors[0], np.poly([2, 2, 3])) + assert_almost_equal(factors[1], np.poly([1, 1, 1, 3])) + assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2])) + assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) + + factors, poly = _compute_factors([1, 2, 3], [3, 2, 1], + include_powers=True) + assert_equal(len(factors), 6) + assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3])) + assert_almost_equal(factors[1], np.poly([1, 2, 2, 3])) + assert_almost_equal(factors[2], np.poly([2, 2, 3])) + assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3])) + assert_almost_equal(factors[4], np.poly([1, 1, 1, 3])) + assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2])) + assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) + + def test_group_poles(self): + unique, multiplicity = _group_poles( + [1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min') + assert_equal(unique, [1.0, 2.0, 3.0]) + assert_equal(multiplicity, [3, 2, 1]) + + def test_residue_general(self): + # Test are taken from issue #4464, note that poles in scipy are + # in increasing by absolute value order, opposite to MATLAB. + r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3]) + assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4) + assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4) + assert_almost_equal(k, [-1.2500], decimal=4) + + r, p, k = residue([-4, 8], [1, 6, 8]) + assert_almost_equal(r, [8, -12]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residue([4, 1], [1, -1, -2]) + assert_almost_equal(r, [1, 3]) + assert_almost_equal(p, [-1, 2]) + assert_equal(k.size, 0) + + r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406]) + self.assert_rp_almost_equal( + r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25], + [0.5 - 0.2j, 0.5 + 0.2j, 0.7]) + assert_equal(k.size, 0) + + r, p, k = residue([2, 1], [1, 5, 8, 4]) + self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2]) + assert_equal(k.size, 0) + + r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348], + [1, -0.7, -0.14, 0.048]) + assert_almost_equal(r, [-3, 4, 1]) + assert_almost_equal(p, [0.2, -0.3, 0.8]) + assert_almost_equal(k, [3, 1]) + + r, p, k = residue([1], [1, 2, -3]) + assert_almost_equal(r, [0.25, -0.25]) + assert_almost_equal(p, [1, -3]) + assert_equal(k.size, 0) + + r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1]) + self.assert_rp_almost_equal(r, p, + [1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1]) + assert_equal(k.size, 0) + + r, p, k = residue([3, 8, 6], [1, 3, 3, 1]) + self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1]) + assert_equal(k.size, 0) + + r, p, k = residue([3, -1], [1, -3, 2]) + assert_almost_equal(r, [-2, 5]) + assert_almost_equal(p, [1, 2]) + assert_equal(k.size, 0) + + r, p, k = residue([2, 3, -1], [1, -3, 2]) + assert_almost_equal(r, [-4, 13]) + assert_almost_equal(p, [1, 2]) + assert_almost_equal(k, [2]) + + r, p, k = residue([7, 2, 3, -1], [1, -3, 2]) + assert_almost_equal(r, [-11, 69]) + assert_almost_equal(p, [1, 2]) + assert_almost_equal(k, [7, 23]) + + r, p, k = residue([2, 3, -1], [1, -3, 4, -2]) + self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j], + [1, 1 - 1j, 1 + 1j]) + assert_almost_equal(k.size, 0) + + def test_residue_leading_zeros(self): + # Leading zeros in numerator or denominator must not affect the answer. + r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3]) + r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3]) + r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3]) + r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3]) + assert_almost_equal(r0, r1) + assert_almost_equal(r0, r2) + assert_almost_equal(r0, r3) + assert_almost_equal(p0, p1) + assert_almost_equal(p0, p2) + assert_almost_equal(p0, p3) + assert_almost_equal(k0, k1) + assert_almost_equal(k0, k2) + assert_almost_equal(k0, k3) + + def test_resiude_degenerate(self): + # Several tests for zero numerator and denominator. + r, p, k = residue([0, 0], [1, 6, 8]) + assert_almost_equal(r, [0, 0]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residue(0, 1) + assert_equal(r.size, 0) + assert_equal(p.size, 0) + assert_equal(k.size, 0) + + with pytest.raises(ValueError, match="Denominator `a` is zero."): + residue(1, 0) + + def test_residuez_general(self): + r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j]) + self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j], + [1j, 1, 1]) + assert_almost_equal(k, [2j]) + + r, p, k = residuez([1, 2, 1], [1, -1, 0.3561]) + self.assert_rp_almost_equal(r, p, + [-0.9041 - 5.9928j, -0.9041 + 5.9928j], + [0.5 + 0.3257j, 0.5 - 0.3257j], + decimal=4) + assert_almost_equal(k, [2.8082], decimal=4) + + r, p, k = residuez([1, -1], [1, -5, 6]) + assert_almost_equal(r, [-1, 2]) + assert_almost_equal(p, [2, 3]) + assert_equal(k.size, 0) + + r, p, k = residuez([2, 3, 4], [1, 3, 3, 1]) + self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, -10, -4, 4], [2, -2, -4]) + assert_almost_equal(r, [0.5, -1.5]) + assert_almost_equal(p, [-1, 2]) + assert_almost_equal(k, [1.5, -1]) + + r, p, k = residuez([18], [18, 3, -4, -1]) + self.assert_rp_almost_equal(r, p, + [0.36, 0.24, 0.4], [0.5, -1/3, -1/3]) + assert_equal(k.size, 0) + + r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4])) + assert_almost_equal(r, [-10/3, 16/3]) + assert_almost_equal(p, [-0.25, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, -2, 1], [1, -1]) + assert_almost_equal(r, [0]) + assert_almost_equal(p, [1]) + assert_almost_equal(k, [1, -1]) + + r, p, k = residuez(1, [1, -1j]) + assert_almost_equal(r, [1]) + assert_almost_equal(p, [1j]) + assert_equal(k.size, 0) + + r, p, k = residuez(1, [1, -1, 0.25]) + assert_almost_equal(r, [0, 1]) + assert_almost_equal(p, [0.5, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez(1, [1, -0.75, .125]) + assert_almost_equal(r, [-1, 2]) + assert_almost_equal(p, [0.25, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, 6, 2], [1, -2, 1]) + assert_almost_equal(r, [-10, 9]) + assert_almost_equal(p, [1, 1]) + assert_almost_equal(k, [2]) + + r, p, k = residuez([6, 2], [1, -2, 1]) + assert_almost_equal(r, [-2, 8]) + assert_almost_equal(p, [1, 1]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, 6, 6, 2], [1, -2, 1]) + assert_almost_equal(r, [-24, 15]) + assert_almost_equal(p, [1, 1]) + assert_almost_equal(k, [10, 2]) + + r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1]) + self.assert_rp_almost_equal(r, p, + [0.2618 + 0.1902j, 0.2618 - 0.1902j, + 0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j], + [-0.8090 + 0.5878j, -0.8090 - 0.5878j, + 1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j], + decimal=4) + assert_equal(k.size, 0) + + def test_residuez_trailing_zeros(self): + # Trailing zeros in numerator or denominator must not affect the + # answer. + r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3]) + r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3]) + r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0]) + r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0]) + assert_almost_equal(r0, r1) + assert_almost_equal(r0, r2) + assert_almost_equal(r0, r3) + assert_almost_equal(p0, p1) + assert_almost_equal(p0, p2) + assert_almost_equal(p0, p3) + assert_almost_equal(k0, k1) + assert_almost_equal(k0, k2) + assert_almost_equal(k0, k3) + + def test_residuez_degenerate(self): + r, p, k = residuez([0, 0], [1, 6, 8]) + assert_almost_equal(r, [0, 0]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residuez(0, 1) + assert_equal(r.size, 0) + assert_equal(p.size, 0) + assert_equal(k.size, 0) + + with pytest.raises(ValueError, match="Denominator `a` is zero."): + residuez(1, 0) + + with pytest.raises(ValueError, + match="First coefficient of determinant `a` must " + "be non-zero."): + residuez(1, [0, 1, 2, 3]) + + def test_inverse_unique_roots_different_rtypes(self): + # This test was inspired by github issue 2496. + r = [3 / 10, -1 / 6, -2 / 15] + p = [0, -2, -5] + k = [] + b_expected = [0, 1, 3] + a_expected = [1, 7, 10, 0] + + # With the default tolerance, the rtype does not matter + # for this example. + for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): + b, a = invres(r, p, k, rtype=rtype) + assert_allclose(b, b_expected) + assert_allclose(a, a_expected) + + b, a = invresz(r, p, k, rtype=rtype) + assert_allclose(b, b_expected) + assert_allclose(a, a_expected) + + def test_inverse_repeated_roots_different_rtypes(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + b_expected = [0, 0, 1, 3] + b_expected_z = [-1/6, -2/3, 11/6, 3] + a_expected = [1, 9, 24, 20, 0] + + for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): + b, a = invres(r, p, k, rtype=rtype) + assert_allclose(b, b_expected, atol=1e-14) + assert_allclose(a, a_expected) + + b, a = invresz(r, p, k, rtype=rtype) + assert_allclose(b, b_expected_z, atol=1e-14) + assert_allclose(a, a_expected) + + def test_inverse_bad_rtype(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + with pytest.raises(ValueError, match="`rtype` must be one of"): + invres(r, p, k, rtype='median') + with pytest.raises(ValueError, match="`rtype` must be one of"): + invresz(r, p, k, rtype='median') + + def test_invresz_one_coefficient_bug(self): + # Regression test for issue in gh-4646. + r = [1] + p = [2] + k = [0] + b, a = invresz(r, p, k) + assert_allclose(b, [1.0]) + assert_allclose(a, [1.0, -2.0]) + + def test_invres(self): + b, a = invres([1], [1], []) + assert_almost_equal(b, [1]) + assert_almost_equal(a, [1, -1]) + + b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) + assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) + assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) + + b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) + assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10]) + assert_almost_equal(a, [1, -3 - 1j, 4]) + + b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2], + [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) + assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j, + -292 + 219j, 192 - 268j]) + assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, + 108 - 54j, -81 + 108j]) + + b, a = invres([-1, 1j], [1, 1], [1, 2]) + assert_almost_equal(b, [1, 0, -4, 3 + 1j]) + assert_almost_equal(a, [1, -2, 1]) + + def test_invresz(self): + b, a = invresz([1], [1], []) + assert_almost_equal(b, [1]) + assert_almost_equal(a, [1, -1]) + + b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) + assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) + assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) + + b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) + assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12]) + assert_almost_equal(a, [1, -3 - 1j, 4]) + + b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2], + [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) + assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j, + -354 + 228j, 234 - 297j]) + assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, + 108 - 54j, -81 + 108j]) + + b, a = invresz([-1, 1j], [1, 1], [1, 2]) + assert_almost_equal(b, [1j, 1, -3, 2]) + assert_almost_equal(a, [1, -2, 1]) + + def test_inverse_scalar_arguments(self): + b, a = invres(1, 1, 1) + assert_almost_equal(b, [1, 0]) + assert_almost_equal(a, [1, -1]) + + b, a = invresz(1, 1, 1) + assert_almost_equal(b, [2, -1]) + assert_almost_equal(a, [1, -1]) + + +class TestVectorstrength: + + def test_single_1dperiod(self): + events = np.array([.5]) + period = 5. + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_single_2dperiod(self): + events = np.array([.5]) + period = [1, 2, 5.] + targ_strength = [1.] * 3 + targ_phase = np.array([.5, .25, .1]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_array_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_1dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = 2 + targ_strength = 1. + targ_phase = .125 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_2dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = [1, 2, ] + targ_strength = [1.] * 2 + targ_phase = np.array([.25, .125]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_1dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = 1 + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_2dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = [1, .5] + targ_strength = [1.] * 2 + targ_phase = np.array([.1, .2]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_1dperiod(self): + events = np.array([.25, .5, .75]) + period = 1 + targ_strength = 1. / 3. + targ_phase = .5 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_2dperiod(self): + events = np.array([.25, .5, .75]) + period = [1., 1., 1., 1.] + targ_strength = [1. / 3.] * 4 + targ_phase = np.array([.5, .5, .5, .5]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_opposite_1dperiod(self): + events = np.array([0, .25, .5, .75]) + period = 1. + targ_strength = 0 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + + def test_opposite_2dperiod(self): + events = np.array([0, .25, .5, .75]) + period = [1.] * 10 + targ_strength = [0.] * 10 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + + def test_2d_events_ValueError(self): + events = np.array([[1, 2]]) + period = 1. + assert_raises(ValueError, vectorstrength, events, period) + + def test_2d_period_ValueError(self): + events = 1. + period = np.array([[1]]) + assert_raises(ValueError, vectorstrength, events, period) + + def test_zero_period_ValueError(self): + events = 1. + period = 0 + assert_raises(ValueError, vectorstrength, events, period) + + def test_negative_period_ValueError(self): + events = 1. + period = -1 + assert_raises(ValueError, vectorstrength, events, period) + + +def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0): + """Wrap assert_allclose while casting object arrays.""" + if actual.dtype.kind == 'O': + dtype = np.array(actual.flat[0]).dtype + actual, desired = actual.astype(dtype), desired.astype(dtype) + assert_allclose(actual, desired, rtol, atol) + + +@pytest.mark.parametrize('func', (sosfilt, lfilter)) +def test_nonnumeric_dtypes(func): + x = [Decimal(1), Decimal(2), Decimal(3)] + b = [Decimal(1), Decimal(2), Decimal(3)] + a = [Decimal(1), Decimal(2), Decimal(3)] + x = np.array(x) + assert x.dtype.kind == 'O' + desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float)) + if func is sosfilt: + actual = sosfilt([b + a], x) + else: + actual = lfilter(b, a, x) + assert all(isinstance(x, Decimal) for x in actual) + assert_allclose(actual.astype(float), desired.astype(float)) + # Degenerate cases + if func is lfilter: + args = [1., 1.] + else: + args = [tf2sos(1., 1.)] + + with pytest.raises(ValueError, match='must be at least 1-D'): + func(*args, x=1.) + + +@pytest.mark.parametrize('dt', 'fdFD') +class TestSOSFilt: + + # The test_rank* tests are pulled from _TestLinearFilter + def test_rank1(self, dt): + x = np.linspace(0, 5, 6).astype(dt) + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, -0.5]).astype(dt) + + # Test simple IIR + y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt) + sos = tf2sos(b, a) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + # Test simple FIR + b = np.array([1, 1]).astype(dt) + # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: + a = np.array([1, 0]).astype(dt) + y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + b = [1, 1, 0] + a = [1, 0, 0] + x = np.ones(8) + sos = np.concatenate((b, a)) + sos.shape = (1, 6) + y = sosfilt(sos, x) + assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) + + def test_rank2(self, dt): + shape = (4, 3) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + x = x.astype(dt) + + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, 0.5]).astype(dt) + + y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], + dtype=dt) + + y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]], dtype=dt) + + y = sosfilt(tf2sos(b, a), x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + y = sosfilt(tf2sos(b, a), x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank3(self, dt): + shape = (4, 3, 2) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, 0.5]).astype(dt) + + # Test last axis + y = sosfilt(tf2sos(b, a), x) + for i in range(x.shape[0]): + for j in range(x.shape[1]): + assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) + + def test_initial_conditions(self, dt): + b1, a1 = signal.butter(2, 0.25, 'low') + b2, a2 = signal.butter(2, 0.75, 'low') + b3, a3 = signal.butter(2, 0.75, 'low') + b = np.convolve(np.convolve(b1, b2), b3) + a = np.convolve(np.convolve(a1, a2), a3) + sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) + + x = np.random.rand(50).astype(dt) + + # Stopping filtering and continuing + y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) + y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] + assert_allclose_cast(y_true, lfilter(b, a, x)) + + y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) + y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] + assert_allclose_cast(y_true, y_sos) + + # Use a step function + zi = sosfilt_zi(sos) + x = np.ones(8, dt) + y, zf = sosfilt(sos, x, zi=zi) + + assert_allclose_cast(y, np.ones(8)) + assert_allclose_cast(zf, zi) + + # Initial condition shape matching + x.shape = (1, 1) + x.shape # 3D + assert_raises(ValueError, sosfilt, sos, x, zi=zi) + zi_nd = zi.copy() + zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) + assert_raises(ValueError, sosfilt, sos, x, + zi=zi_nd[:, :, :, [0, 1, 1]]) + y, zf = sosfilt(sos, x, zi=zi_nd) + assert_allclose_cast(y[0, 0], np.ones(8)) + assert_allclose_cast(zf[:, 0, 0, :], zi) + + def test_initial_conditions_3d_axis1(self, dt): + # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. + + # Input array is x. + x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) + x = x.astype(dt) + + # Design a filter in ZPK format and convert to SOS + zpk = signal.butter(6, 0.35, output='zpk') + sos = zpk2sos(*zpk) + nsections = sos.shape[0] + + # Filter along this axis. + axis = 1 + + # Initial conditions, all zeros. + shp = list(x.shape) + shp[axis] = 2 + shp = [nsections] + shp + z0 = np.zeros(shp) + + # Apply the filter to x. + yf, zf = sosfilt(sos, x, axis=axis, zi=z0) + + # Apply the filter to x in two stages. + y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) + y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) + + # y should equal yf, and z2 should equal zf. + y = np.concatenate((y1, y2), axis=axis) + assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13) + assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13) + + # let's try the "step" initial condition + zi = sosfilt_zi(sos) + zi.shape = [nsections, 1, 2, 1] + zi = zi * x[:, 0:1, :] + y = sosfilt(sos, x, axis=axis, zi=zi)[0] + # check it against the TF form + b, a = zpk2tf(*zpk) + zi = lfilter_zi(b, a) + zi.shape = [1, zi.size, 1] + zi = zi * x[:, 0:1, :] + y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] + assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13) + + def test_bad_zi_shape(self, dt): + # The shape of zi is checked before using any values in the + # arguments, so np.empty is fine for creating the arguments. + x = np.empty((3, 15, 3), dt) + sos = np.zeros((4, 6)) + zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) + with pytest.raises(ValueError, match='should be all ones'): + sosfilt(sos, x, zi=zi, axis=1) + sos[:, 3] = 1. + with pytest.raises(ValueError, match='Invalid zi shape'): + sosfilt(sos, x, zi=zi, axis=1) + + def test_sosfilt_zi(self, dt): + sos = signal.butter(6, 0.2, output='sos') + zi = sosfilt_zi(sos) + + y, zf = sosfilt(sos, np.ones(40, dt), zi=zi) + assert_allclose_cast(zf, zi, rtol=1e-13) + + # Expected steady state value of the step response of this filter: + ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) + assert_allclose_cast(y, ss, rtol=1e-13) + + # zi as array-like + _, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist()) + assert_allclose_cast(zf, zi, rtol=1e-13) + + +class TestDeconvolve: + + def test_basic(self): + # From docstring example + original = [0, 1, 0, 0, 1, 1, 0, 0] + impulse_response = [2, 1] + recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] + recovered, remainder = signal.deconvolve(recorded, impulse_response) + assert_allclose(recovered, original) + + def test_n_dimensional_signal(self): + recorded = [[0, 0], [0, 0]] + impulse_response = [0, 0] + with pytest.raises(ValueError, match="signal must be 1-D."): + quotient, remainder = signal.deconvolve(recorded, impulse_response) + + def test_n_dimensional_divisor(self): + recorded = [0, 0] + impulse_response = [[0, 0], [0, 0]] + with pytest.raises(ValueError, match="divisor must be 1-D."): + quotient, remainder = signal.deconvolve(recorded, impulse_response) + + +class TestDetrend: + + def test_basic(self): + detrended = detrend(array([1, 2, 3])) + detrended_exact = array([0, 0, 0]) + assert_array_almost_equal(detrended, detrended_exact) + + def test_copy(self): + x = array([1, 1.2, 1.5, 1.6, 2.4]) + copy_array = detrend(x, overwrite_data=False) + inplace = detrend(x, overwrite_data=True) + assert_array_almost_equal(copy_array, inplace) + + @pytest.mark.parametrize('kind', ['linear', 'constant']) + @pytest.mark.parametrize('axis', [0, 1, 2]) + def test_axis(self, axis, kind): + data = np.arange(5*6*7).reshape(5, 6, 7) + detrended = detrend(data, type=kind, axis=axis) + assert detrended.shape == data.shape + + def test_bp(self): + data = [0, 1, 2] + [5, 0, -5, -10] + detrended = detrend(data, type='linear', bp=3) + assert_allclose(detrended, 0, atol=1e-14) + + # repeat with ndim > 1 and axis + data = np.asarray(data)[None, :, None] + + detrended = detrend(data, type="linear", bp=3, axis=1) + assert_allclose(detrended, 0, atol=1e-14) + + # breakpoint index > shape[axis]: raises + with assert_raises(ValueError): + detrend(data, type="linear", bp=3) + + @pytest.mark.parametrize('bp', [np.array([0, 2]), [0, 2]]) + def test_detrend_array_bp(self, bp): + # regression test for https://github.com/scipy/scipy/issues/18675 + rng = np.random.RandomState(12345) + x = rng.rand(10) + # bp = np.array([0, 2]) + + res = detrend(x, bp=bp) + res_scipy_191 = np.array([-4.44089210e-16, -2.22044605e-16, + -1.11128506e-01, -1.69470553e-01, 1.14710683e-01, 6.35468419e-02, + 3.53533144e-01, -3.67877935e-02, -2.00417675e-02, -1.94362049e-01]) + + assert_allclose(res, res_scipy_191, atol=1e-14) + + +class TestUniqueRoots: + def test_real_no_repeat(self): + p = [-1.0, -0.5, 0.3, 1.2, 10.0] + unique, multiplicity = unique_roots(p) + assert_almost_equal(unique, p, decimal=15) + assert_equal(multiplicity, np.ones(len(p))) + + def test_real_repeat(self): + p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05] + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') + assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') + assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') + assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + def test_complex_no_repeat(self): + p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j] + unique, multiplicity = unique_roots(p) + assert_almost_equal(unique, p, decimal=15) + assert_equal(multiplicity, np.ones(len(p))) + + def test_complex_repeat(self): + p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0, + 0.5 + 0.5j, 0.45 + 0.55j] + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') + assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') + assert_almost_equal(unique, + [-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') + assert_almost_equal( + unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + def test_gh_4915(self): + p = np.roots(np.convolve(np.ones(5), np.ones(5))) + true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)] + + unique, multiplicity = unique_roots(p) + unique = np.sort(unique) + + assert_almost_equal(np.sort(unique), true_roots, decimal=7) + assert_equal(multiplicity, [2, 2, 2, 2]) + + def test_complex_roots_extra(self): + unique, multiplicity = unique_roots([1.0, 1.0j, 1.0]) + assert_almost_equal(unique, [1.0, 1.0j], decimal=15) + assert_equal(multiplicity, [2, 1]) + + unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1) + assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15) + assert_equal(multiplicity, [2, 1]) + + def test_single_unique_root(self): + p = np.random.rand(100) + 1j * np.random.rand(100) + unique, multiplicity = unique_roots(p, 2) + assert_almost_equal(unique, [np.min(p)], decimal=15) + assert_equal(multiplicity, [100]) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_spectral.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..ed0af49b2ef8901f3c8b073f4d19def5578d0dbe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_spectral.py @@ -0,0 +1,1713 @@ +import sys + +import numpy as np +from numpy.testing import (assert_, assert_approx_equal, + assert_allclose, assert_array_equal, assert_equal, + assert_array_almost_equal_nulp, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +from scipy import signal +from scipy.fft import fftfreq, rfftfreq, fft, irfft +from scipy.integrate import trapezoid +from scipy.signal import (periodogram, welch, lombscargle, coherence, + spectrogram, check_COLA, check_NOLA) +from scipy.signal.windows import hann +from scipy.signal._spectral_py import _spectral_helper + +# Compare ShortTimeFFT.stft() / ShortTimeFFT.istft() with stft() / istft(): +from scipy.signal.tests._scipy_spectral_test_shim import stft_compare as stft +from scipy.signal.tests._scipy_spectral_test_shim import istft_compare as istft +from scipy.signal.tests._scipy_spectral_test_shim import csd_compare as csd + + +class TestPeriodogram: + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_real_onesided_odd(self): + x = np.zeros(15) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8) + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-15) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 1/16.0) + q[0] = 0 + assert_allclose(p, q) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, scaling='spectrum') + g, q = periodogram(x, scaling='density') + assert_allclose(f, np.linspace(0, 0.5, 9)) + assert_allclose(p, q/16.0) + + def test_integer_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_integer_odd(self): + x = np.zeros(15, dtype=int) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8) + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-15) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 1/16.0) + q[0] = 0 + assert_allclose(p, q) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 5.0/16.0) + q[0] = 0 + assert_allclose(p, q) + + def test_unk_scaling(self): + assert_raises(ValueError, periodogram, np.zeros(4, np.complex128), + scaling='foo') + + @pytest.mark.skipif( + sys.maxsize <= 2**32, + reason="On some 32-bit tolerance issue" + ) + def test_nd_axis_m1(self): + x = np.zeros(20, dtype=np.float64) + x = x.reshape((2,1,10)) + x[:,:,0] = 1.0 + f, p = periodogram(x) + assert_array_equal(p.shape, (2, 1, 6)) + assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60) + f0, p0 = periodogram(x[0,0,:]) + assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60) + + @pytest.mark.skipif( + sys.maxsize <= 2**32, + reason="On some 32-bit tolerance issue" + ) + def test_nd_axis_0(self): + x = np.zeros(20, dtype=np.float64) + x = x.reshape((10,2,1)) + x[0,:,:] = 1.0 + f, p = periodogram(x, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60) + f0, p0 = periodogram(x[:,0,0]) + assert_array_almost_equal_nulp(p0, p[:,1,0]) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, 10, 'hann') + win = signal.get_window('hann', 16) + fe, pe = periodogram(x, 10, win) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, periodogram, x, + 10, win_err) # win longer than signal + + def test_padded_fft(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x) + fp, pp = periodogram(x, nfft=32) + assert_allclose(f, fp[::2]) + assert_allclose(p, pp[::2]) + assert_array_equal(pp.shape, (17,)) + + def test_empty_input(self): + f, p = periodogram([]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + for shape in [(0,), (3,0), (0,5,2)]: + f, p = periodogram(np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = periodogram(np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_short_nfft(self): + x = np.zeros(18) + x[0] = 1 + f, p = periodogram(x, nfft=16) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_nfft_is_xshape(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, nfft=16) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9, 'f') + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(15, 'f') + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8, 'f') + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 1/16.0, 'f') + q[0] = 0 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 5.0/16.0, 'f') + q[0] = 0 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_shorter_window_error(self): + x = np.zeros(16) + x[0] = 1 + win = signal.get_window('hann', 10) + expected_msg = ('the size of the window must be the same size ' + 'of the input on the specified axis') + with assert_raises(ValueError, match=expected_msg): + periodogram(x, window=win) + + +class TestWelch: + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_onesided_odd(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, scaling='spectrum') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, + 0.02083333]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_odd(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, + 0.55555556, 0.55555556, 0.55555556, 0.38194444]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_unk_scaling(self): + assert_raises(ValueError, welch, np.zeros(4, np.complex128), + scaling='foo', nperseg=4) + + def test_detrend_linear(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = welch(x, nperseg=10, detrend='linear') + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_no_detrending(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f1, p1 = welch(x, nperseg=10, detrend=False) + f2, p2 = welch(x, nperseg=10, detrend=lambda x: x) + assert_allclose(f1, f2, atol=1e-15) + assert_allclose(p1, p2, atol=1e-15) + + def test_detrend_external(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = welch(x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_m1(self): + x = np.arange(40, dtype=np.float64) + 0.04 + x = x.reshape((2,2,10)) + f, p = welch(x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + x = np.moveaxis(x, 2, 0) + f, p = welch(x, nperseg=10, axis=0, + detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_nd_axis_m1(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + f, p = welch(x, nperseg=10) + assert_array_equal(p.shape, (2, 1, 6)) + assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) + f0, p0 = welch(x[0,0,:], nperseg=10) + assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) + + def test_nd_axis_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((10,2,1)) + f, p = welch(x, nperseg=10, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) + f0, p0 = welch(x[:,0,0], nperseg=10) + assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, 10, 'hann', nperseg=8) + win = signal.get_window('hann', 8) + fe, pe = welch(x, 10, win, nperseg=None) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + assert_array_equal(fe.shape, (5,)) # because win length used as nperseg + assert_array_equal(pe.shape, (5,)) + assert_raises(ValueError, welch, x, + 10, win, nperseg=4) # because nperseg != win.shape[-1] + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, welch, x, + 10, win_err, nperseg=None) # win longer than signal + + def test_empty_input(self): + f, p = welch([]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + for shape in [(0,), (3,0), (0,5,2)]: + f, p = welch(np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = welch(np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_short_data(self): + x = np.zeros(8) + x[0] = 1 + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + with suppress_warnings() as sup: + msg = "nperseg = 256 is greater than input length = 8, using nperseg = 8" + sup.filter(UserWarning, msg) + f, p = welch(x,window='hann') # default nperseg + f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg + f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f2) + assert_allclose(p1, p2) + + def test_window_long_or_nd(self): + assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1])) + assert_raises(ValueError, welch, np.zeros(4), 1, + np.arange(6).reshape((2,3))) + + def test_nondefault_noverlap(self): + x = np.zeros(64) + x[::8] = 1 + f, p = welch(x, nperseg=16, noverlap=4) + q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., + 1./6.]) + assert_allclose(p, q, atol=1e-12) + + def test_bad_noverlap(self): + assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7) + + def test_nfft_too_short(self): + assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, + 0.17072113], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.11111111, + 0.07638889], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, + 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype, + f'dtype mismatch, {p.dtype}, {q.dtype}') + + def test_padded_freqs(self): + x = np.zeros(12) + + nfft = 24 + f = fftfreq(nfft, 1.0)[:nfft//2+1] + f[-1] *= -1 + fodd, _ = welch(x, nperseg=5, nfft=nfft) + feven, _ = welch(x, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + nfft = 25 + f = fftfreq(nfft, 1.0)[:(nfft + 1)//2] + fodd, _ = welch(x, nperseg=5, nfft=nfft) + feven, _ = welch(x, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + def test_window_correction(self): + A = 20 + fs = 1e4 + nperseg = int(fs//10) + fsig = 300 + ii = int(fsig*nperseg//fs) # Freq index of fsig + + tt = np.arange(fs)/fs + x = A*np.sin(2*np.pi*fsig*tt) + + for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']: + _, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window, + scaling='spectrum') + freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window, + scaling='density') + + # Check peak height at signal frequency for 'spectrum' + assert_allclose(p_spec[ii], A**2/2.0) + # Check integrated spectrum RMS for 'density' + assert_allclose(np.sqrt(trapezoid(p_dens, freq)), A*np.sqrt(2)/2, + rtol=1e-3) + + def test_axis_rolling(self): + np.random.seed(1234) + + x_flat = np.random.randn(1024) + _, p_flat = welch(x_flat) + + for a in range(3): + newshape = [1,]*3 + newshape[a] = -1 + x = x_flat.reshape(newshape) + + _, p_plus = welch(x, axis=a) # Positive axis index + _, p_minus = welch(x, axis=a-x.ndim) # Negative axis index + + assert_equal(p_flat, p_plus.squeeze(), err_msg=a) + assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim) + + def test_average(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, average='median') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([.1, .05, 0., 1.54074396e-33, 0.]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + assert_raises(ValueError, welch, x, nperseg=8, + average='unrecognised-average') + + +class TestCSD: + def test_pad_shorter_x(self): + x = np.zeros(8) + y = np.zeros(12) + + f = np.linspace(0, 0.5, 7) + c = np.zeros(7,dtype=np.complex128) + f1, c1 = csd(x, y, nperseg=12) + + assert_allclose(f, f1) + assert_allclose(c, c1) + + def test_pad_shorter_y(self): + x = np.zeros(12) + y = np.zeros(8) + + f = np.linspace(0, 0.5, 7) + c = np.zeros(7,dtype=np.complex128) + f1, c1 = csd(x, y, nperseg=12) + + assert_allclose(f, f1) + assert_allclose(c, c1) + + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_onesided_odd(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, scaling='spectrum') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, + 0.02083333]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_odd(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, + 0.55555556, 0.55555556, 0.55555556, 0.38194444]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_unk_scaling(self): + assert_raises(ValueError, csd, np.zeros(4, np.complex128), + np.ones(4, np.complex128), scaling='foo', nperseg=4) + + def test_detrend_linear(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = csd(x, x, nperseg=10, detrend='linear') + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_no_detrending(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f1, p1 = csd(x, x, nperseg=10, detrend=False) + f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x) + assert_allclose(f1, f2, atol=1e-15) + assert_allclose(p1, p2, atol=1e-15) + + def test_detrend_external(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = csd(x, x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_m1(self): + x = np.arange(40, dtype=np.float64) + 0.04 + x = x.reshape((2,2,10)) + f, p = csd(x, x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + x = np.moveaxis(x, 2, 0) + f, p = csd(x, x, nperseg=10, axis=0, + detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_nd_axis_m1(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + f, p = csd(x, x, nperseg=10) + assert_array_equal(p.shape, (2, 1, 6)) + assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) + f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10) + assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) + + def test_nd_axis_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((10,2,1)) + f, p = csd(x, x, nperseg=10, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) + f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10) + assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, 10, 'hann', 8) + win = signal.get_window('hann', 8) + fe, pe = csd(x, x, 10, win, nperseg=None) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + assert_array_equal(fe.shape, (5,)) # because win length used as nperseg + assert_array_equal(pe.shape, (5,)) + assert_raises(ValueError, csd, x, x, + 10, win, nperseg=256) # because nperseg != win.shape[-1] + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, csd, x, x, + 10, win_err, nperseg=None) # because win longer than signal + + def test_empty_input(self): + f, p = csd([],np.zeros(10)) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + + f, p = csd(np.zeros(10),[]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + + for shape in [(0,), (3,0), (0,5,2)]: + f, p = csd(np.empty(shape), np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + f, p = csd(np.ones(10), np.empty((5,0))) + assert_array_equal(f.shape, (5,0)) + assert_array_equal(p.shape, (5,0)) + + f, p = csd(np.empty((5,0)), np.ones(10)) + assert_array_equal(f.shape, (5,0)) + assert_array_equal(p.shape, (5,0)) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = csd(np.empty(shape), np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1) + assert_array_equal(f.shape, (10,0,3)) + assert_array_equal(p.shape, (10,0,3)) + + f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1) + assert_array_equal(f.shape, (10,0,3)) + assert_array_equal(p.shape, (10,0,3)) + + def test_short_data(self): + x = np.zeros(8) + x[0] = 1 + + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + with suppress_warnings() as sup: + msg = "nperseg = 256 is greater than input length = 8, using nperseg = 8" + sup.filter(UserWarning, msg) + f, p = csd(x, x, window='hann') # default nperseg + f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg + f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f2) + assert_allclose(p1, p2) + + def test_window_long_or_nd(self): + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, + np.array([1,1,1,1,1])) + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, + np.arange(6).reshape((2,3))) + + def test_nondefault_noverlap(self): + x = np.zeros(64) + x[::8] = 1 + f, p = csd(x, x, nperseg=16, noverlap=4) + q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., + 1./6.]) + assert_allclose(p, q, atol=1e-12) + + def test_bad_noverlap(self): + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann', + 2, 7) + + def test_nfft_too_short(self): + assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3, + nperseg=4) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, + 0.17072113], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.11111111, + 0.07638889], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, + 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype, + f'dtype mismatch, {p.dtype}, {q.dtype}') + + def test_padded_freqs(self): + x = np.zeros(12) + y = np.ones(12) + + nfft = 24 + f = fftfreq(nfft, 1.0)[:nfft//2+1] + f[-1] *= -1 + fodd, _ = csd(x, y, nperseg=5, nfft=nfft) + feven, _ = csd(x, y, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + nfft = 25 + f = fftfreq(nfft, 1.0)[:(nfft + 1)//2] + fodd, _ = csd(x, y, nperseg=5, nfft=nfft) + feven, _ = csd(x, y, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + def test_copied_data(self): + x = np.random.randn(64) + y = x.copy() + + _, p_same = csd(x, x, nperseg=8, average='mean', + return_onesided=False) + _, p_copied = csd(x, y, nperseg=8, average='mean', + return_onesided=False) + assert_allclose(p_same, p_copied) + + _, p_same = csd(x, x, nperseg=8, average='median', + return_onesided=False) + _, p_copied = csd(x, y, nperseg=8, average='median', + return_onesided=False) + assert_allclose(p_same, p_copied) + + +class TestCoherence: + def test_identical_input(self): + x = np.random.randn(20) + y = np.copy(x) # So `y is x` -> False + + f = np.linspace(0, 0.5, 6) + C = np.ones(6) + f1, C1 = coherence(x, y, nperseg=10) + + assert_allclose(f, f1) + assert_allclose(C, C1) + + def test_phase_shifted_input(self): + x = np.random.randn(20) + y = -x + + f = np.linspace(0, 0.5, 6) + C = np.ones(6) + f1, C1 = coherence(x, y, nperseg=10) + + assert_allclose(f, f1) + assert_allclose(C, C1) + + +class TestSpectrogram: + def test_average_all_segments(self): + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg = 16 + noverlap = 2 + + f, _, P = spectrogram(x, fs, window, nperseg, noverlap) + fw, Pw = welch(x, fs, window, nperseg, noverlap) + assert_allclose(f, fw) + assert_allclose(np.mean(P, axis=-1), Pw) + + def test_window_external(self): + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg = 16 + noverlap = 2 + f, _, P = spectrogram(x, fs, window, nperseg, noverlap) + + win = signal.get_window(('tukey', 0.25), 16) + fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2) + assert_array_equal(fe.shape, (9,)) # because win length used as nperseg + assert_array_equal(Pe.shape, (9,73)) + assert_raises(ValueError, spectrogram, x, + fs, win, nperseg=8) # because nperseg != win.shape[-1] + win_err = signal.get_window(('tukey', 0.25), 2048) + assert_raises(ValueError, spectrogram, x, + fs, win_err, nperseg=None) # win longer than signal + + def test_short_data(self): + x = np.random.randn(1024) + fs = 1.0 + + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg + with suppress_warnings() as sup: + sup.filter(UserWarning, + "nperseg = 1025 is greater than input length = 1024, " + "using nperseg = 1024",) + f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25), + nperseg=1025) # user-specified nperseg + f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default + f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f3) + assert_allclose(p1, p3) + +class TestLombscargle: + def test_frequency(self): + """Test if frequency location of peak corresponds to frequency of + generated input signal. + """ + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + P = lombscargle(t, x, f) + + # Check if difference between found frequency maximum and input + # frequency is less than accuracy + delta = f[1] - f[0] + assert_(w - f[np.argmax(P)] < (delta/2.)) + + def test_amplitude(self): + # Test if height of peak in normalized Lomb-Scargle periodogram + # corresponds to amplitude of the generated input signal. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, x, f) + + # Normalize + pgram = np.sqrt(4 * pgram / t.shape[0]) + + # Check if difference between found frequency maximum and input + # frequency is less than accuracy + assert_approx_equal(np.max(pgram), ampl, significant=2) + + def test_precenter(self): + # Test if precenter gives the same result as manually precentering. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + offset = 0.15 # Offset to be subtracted in pre-centering + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + offset + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, x, f, precenter=True) + pgram2 = lombscargle(t, x - x.mean(), f, precenter=False) + + # check if centering worked + assert_allclose(pgram, pgram2) + + def test_normalize(self): + # Test normalize option of Lomb-Scarge. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, x, f) + pgram2 = lombscargle(t, x, f, normalize=True) + + # check if normalization works as expected + assert_allclose(pgram * 2 / np.dot(x, x), pgram2) + assert_approx_equal(np.max(pgram2), 1.0, significant=2) + + def test_wrong_shape(self): + t = np.linspace(0, 1, 1) + x = np.linspace(0, 1, 2) + f = np.linspace(0, 1, 3) + assert_raises(ValueError, lombscargle, t, x, f) + + def test_zero_division(self): + t = np.zeros(1) + x = np.zeros(1) + f = np.zeros(1) + assert_raises(ZeroDivisionError, lombscargle, t, x, f) + + def test_lombscargle_atan_vs_atan2(self): + # https://github.com/scipy/scipy/issues/3787 + # This raised a ZeroDivisionError. + t = np.linspace(0, 10, 1000, endpoint=False) + x = np.sin(4*t) + f = np.linspace(0, 50, 500, endpoint=False) + 0.1 + lombscargle(t, x, f*2*np.pi) + + +class TestSTFT: + def test_input_validation(self): + + def chk_VE(match): + """Assert for a ValueError matching regexp `match`. + + This little wrapper allows a more concise code layout. + """ + return pytest.raises(ValueError, match=match) + + # Checks for check_COLA(): + with chk_VE('nperseg must be a positive integer'): + check_COLA('hann', -10, 0) + with chk_VE('noverlap must be less than nperseg.'): + check_COLA('hann', 10, 20) + with chk_VE('window must be 1-D'): + check_COLA(np.ones((2, 2)), 10, 0) + with chk_VE('window must have length of nperseg'): + check_COLA(np.ones(20), 10, 0) + + # Checks for check_NOLA(): + with chk_VE('nperseg must be a positive integer'): + check_NOLA('hann', -10, 0) + with chk_VE('noverlap must be less than nperseg'): + check_NOLA('hann', 10, 20) + with chk_VE('window must be 1-D'): + check_NOLA(np.ones((2, 2)), 10, 0) + with chk_VE('window must have length of nperseg'): + check_NOLA(np.ones(20), 10, 0) + with chk_VE('noverlap must be a nonnegative integer'): + check_NOLA('hann', 64, -32) + + x = np.zeros(1024) + z = stft(x)[2] + + # Checks for stft(): + with chk_VE('window must be 1-D'): + stft(x, window=np.ones((2, 2))) + with chk_VE('value specified for nperseg is different ' + + 'from length of window'): + stft(x, window=np.ones(10), nperseg=256) + with chk_VE('nperseg must be a positive integer'): + stft(x, nperseg=-256) + with chk_VE('noverlap must be less than nperseg.'): + stft(x, nperseg=256, noverlap=1024) + with chk_VE('nfft must be greater than or equal to nperseg.'): + stft(x, nperseg=256, nfft=8) + + # Checks for istft(): + with chk_VE('Input stft must be at least 2d!'): + istft(x) + with chk_VE('window must be 1-D'): + istft(z, window=np.ones((2, 2))) + with chk_VE('window must have length of 256'): + istft(z, window=np.ones(10), nperseg=256) + with chk_VE('nperseg must be a positive integer'): + istft(z, nperseg=-256) + with chk_VE('noverlap must be less than nperseg.'): + istft(z, nperseg=256, noverlap=1024) + with chk_VE('nfft must be greater than or equal to nperseg.'): + istft(z, nperseg=256, nfft=8) + with pytest.warns(UserWarning, match="NOLA condition failed, " + + "STFT may not be invertible"): + istft(z, nperseg=256, noverlap=0, window='hann') + with chk_VE('Must specify differing time and frequency axes!'): + istft(z, time_axis=0, freq_axis=0) + + # Checks for _spectral_helper(): + with chk_VE("Unknown value for mode foo, must be one of: " + + r"\{'psd', 'stft'\}"): + _spectral_helper(x, x, mode='foo') + with chk_VE("x and y must be equal if mode is 'stft'"): + _spectral_helper(x[:512], x[512:], mode='stft') + with chk_VE("Unknown boundary option 'foo', must be one of: " + + r"\['even', 'odd', 'constant', 'zeros', None\]"): + _spectral_helper(x, x, boundary='foo') + + scaling = "not_valid" + with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"): + stft(x, scaling=scaling) + with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"): + istft(z, scaling=scaling) + + def test_check_COLA(self): + settings = [ + ('boxcar', 10, 0), + ('boxcar', 10, 9), + ('bartlett', 51, 26), + ('hann', 256, 128), + ('hann', 256, 192), + ('blackman', 300, 200), + (('tukey', 0.5), 256, 64), + ('hann', 256, 255), + ] + + for setting in settings: + msg = '{}, {}, {}'.format(*setting) + assert_equal(True, check_COLA(*setting), err_msg=msg) + + def test_check_NOLA(self): + settings_pass = [ + ('boxcar', 10, 0), + ('boxcar', 10, 9), + ('boxcar', 10, 7), + ('bartlett', 51, 26), + ('bartlett', 51, 10), + ('hann', 256, 128), + ('hann', 256, 192), + ('hann', 256, 37), + ('blackman', 300, 200), + ('blackman', 300, 123), + (('tukey', 0.5), 256, 64), + (('tukey', 0.5), 256, 38), + ('hann', 256, 255), + ('hann', 256, 39), + ] + for setting in settings_pass: + msg = '{}, {}, {}'.format(*setting) + assert_equal(True, check_NOLA(*setting), err_msg=msg) + + w_fail = np.ones(16) + w_fail[::2] = 0 + settings_fail = [ + (w_fail, len(w_fail), len(w_fail) // 2), + ('hann', 64, 0), + ] + for setting in settings_fail: + msg = '{}, {}, {}'.format(*setting) + assert_equal(False, check_NOLA(*setting), err_msg=msg) + + def test_average_all_segments(self): + np.random.seed(1234) + x = np.random.randn(1024) + + fs = 1.0 + window = 'hann' + nperseg = 16 + noverlap = 8 + + # Compare twosided, because onesided welch doubles non-DC terms to + # account for power at negative frequencies. stft doesn't do this, + # because it breaks invertibility. + f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False, + return_onesided=False, boundary=None) + fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False, + scaling='spectrum', detrend=False) + + assert_allclose(f, fw) + assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw) + + def test_permute_axes(self): + np.random.seed(1234) + x = np.random.randn(1024) + + fs = 1.0 + window = 'hann' + nperseg = 16 + noverlap = 8 + + f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap) + f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap, + axis=0) + + t3, x1 = istft(Z1, fs, window, nperseg, noverlap) + t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0, + freq_axis=-1) + + assert_allclose(f1, f2) + assert_allclose(t1, t2) + assert_allclose(t3, t4) + assert_allclose(Z1, Z2[:, 0, 0, :]) + assert_allclose(x1, x2[:, 0, 0]) + + @pytest.mark.parametrize('scaling', ['spectrum', 'psd']) + def test_roundtrip_real(self, scaling): + np.random.seed(1234) + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + scaling=scaling) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, scaling=scaling) + + msg = f'{window}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + def test_roundtrip_not_nola(self): + np.random.seed(1234) + + w_fail = np.ones(16) + w_fail[::2] = 0 + settings = [ + (w_fail, 256, len(w_fail), len(w_fail) // 2), + ('hann', 256, 64, 0), + ] + + for window, N, nperseg, noverlap in settings: + msg = f'{window}, {N}, {nperseg}, {noverlap}' + assert not check_NOLA(window, nperseg, noverlap), msg + + t = np.arange(N) + x = 10 * np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary='zeros') + with pytest.warns(UserWarning, match='NOLA'): + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, boundary=True) + + assert np.allclose(t, tr[:len(t)]), msg + assert not np.allclose(x, xr[:len(x)]), msg + + def test_roundtrip_nola_not_cola(self): + np.random.seed(1234) + + settings = [ + ('boxcar', 100, 10, 3), # NOLA True, COLA False + ('bartlett', 101, 51, 37), # NOLA True, COLA False + ('hann', 1024, 256, 127), # NOLA True, COLA False + (('tukey', 0.5), 1152, 256, 14), # NOLA True, COLA False + ('hann', 1024, 256, 5), # NOLA True, COLA False + ] + + for window, N, nperseg, noverlap in settings: + msg = f'{window}, {nperseg}, {noverlap}' + assert check_NOLA(window, nperseg, noverlap), msg + assert not check_COLA(window, nperseg, noverlap), msg + + t = np.arange(N) + x = 10 * np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary='zeros') + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, boundary=True) + + msg = f'{window}, {noverlap}' + assert_allclose(t, tr[:len(t)], err_msg=msg) + assert_allclose(x, xr[:len(x)], err_msg=msg) + + def test_roundtrip_float32(self): + np.random.seed(1234) + + settings = [('hann', 1024, 256, 128)] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + x = x.astype(np.float32) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window) + + msg = f'{window}, {noverlap}' + assert_allclose(t, t, err_msg=msg) + assert_allclose(x, xr, err_msg=msg, rtol=1e-4, atol=1e-5) + assert_(x.dtype == xr.dtype) + + @pytest.mark.parametrize('scaling', ['spectrum', 'psd']) + def test_roundtrip_complex(self, scaling): + np.random.seed(1234) + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + return_onesided=False, scaling=scaling) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, input_onesided=False, + scaling=scaling) + + msg = f'{window}, {nperseg}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + # Check that asking for onesided switches to twosided + with suppress_warnings() as sup: + sup.filter(UserWarning, + "Input data is complex, switching to return_onesided=False") + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + return_onesided=True, scaling=scaling) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, input_onesided=False, scaling=scaling) + + msg = f'{window}, {nperseg}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + def test_roundtrip_boundary_extension(self): + np.random.seed(1234) + + # Test against boxcar, since window is all ones, and thus can be fully + # recovered with no boundary extension + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary=None) + + _, xr = istft(zz, noverlap=noverlap, window=window, boundary=False) + + for boundary in ['even', 'odd', 'constant', 'zeros']: + _, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary=boundary) + + _, xr_ext = istft(zz_ext, noverlap=noverlap, window=window, + boundary=True) + + msg = f'{window}, {noverlap}, {boundary}' + assert_allclose(x, xr, err_msg=msg) + assert_allclose(x, xr_ext, err_msg=msg) + + def test_roundtrip_padded_signal(self): + np.random.seed(1234) + + settings = [ + ('boxcar', 101, 10, 0), + ('hann', 1000, 256, 128), + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True) + + tr, xr = istft(zz, noverlap=noverlap, window=window) + + msg = f'{window}, {noverlap}' + # Account for possible zero-padding at the end + assert_allclose(t, tr[:t.size], err_msg=msg) + assert_allclose(x, xr[:x.size], err_msg=msg) + + def test_roundtrip_padded_FFT(self): + np.random.seed(1234) + + settings = [ + ('hann', 1024, 256, 128, 512), + ('hann', 1024, 256, 128, 501), + ('boxcar', 100, 10, 0, 33), + (('tukey', 0.5), 1152, 256, 64, 1024), + ] + + for window, N, nperseg, noverlap, nfft in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + xc = x*np.exp(1j*np.pi/4) + + # real signal + _, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, detrend=None, padded=True) + + # complex signal + _, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, detrend=None, padded=True, + return_onesided=False) + + tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window) + + tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, input_onesided=False) + + msg = f'{window}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + assert_allclose(xc, xcr, err_msg=msg) + + def test_axis_rolling(self): + np.random.seed(1234) + + x_flat = np.random.randn(1024) + _, _, z_flat = stft(x_flat) + + for a in range(3): + newshape = [1,]*3 + newshape[a] = -1 + x = x_flat.reshape(newshape) + + _, _, z_plus = stft(x, axis=a) # Positive axis index + _, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index + + assert_equal(z_flat, z_plus.squeeze(), err_msg=a) + assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim) + + # z_flat has shape [n_freq, n_time] + + # Test vs. transpose + _, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1) + _, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1) + + assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus') + assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus') + + def test_roundtrip_scaling(self): + """Verify behavior of scaling parameter. """ + # Create 1024 sample cosine signal with amplitude 2: + X = np.zeros(513, dtype=complex) + X[256] = 1024 + x = np.fft.irfft(X) + power_x = sum(x**2) / len(x) # power of signal x is 2 + + # Calculate magnitude-scaled STFT: + Zs = stft(x, boundary='even', scaling='spectrum')[2] + + # Test round trip: + x1 = istft(Zs, boundary=True, scaling='spectrum')[1] + assert_allclose(x1, x) + + # For a Hann-windowed 256 sample length FFT, we expect a peak at + # frequency 64 (since it is 1/4 the length of X) with a height of 1 + # (half the amplitude). A Hann window of a perfectly centered sine has + # the magnitude [..., 0, 0, 0.5, 1, 0.5, 0, 0, ...]. + # Note that in this case the 'even' padding works for the beginning + # but not for the end of the STFT. + assert_allclose(abs(Zs[63, :-1]), 0.5) + assert_allclose(abs(Zs[64, :-1]), 1) + assert_allclose(abs(Zs[65, :-1]), 0.5) + # All other values should be zero: + Zs[63:66, :-1] = 0 + # Note since 'rtol' does not have influence here, atol needs to be set: + assert_allclose(Zs[:, :-1], 0, atol=np.finfo(Zs.dtype).resolution) + + # Calculate two-sided psd-scaled STFT: + # - using 'even' padding since signal is axis symmetric - this ensures + # stationary behavior on the boundaries + # - using the two-sided transform allows determining the spectral + # power by `sum(abs(Zp[:, k])**2) / len(f)` for the k-th time slot. + Zp = stft(x, return_onesided=False, boundary='even', scaling='psd')[2] + + # Calculate spectral power of Zd by summing over the frequency axis: + psd_Zp = np.sum(Zp.real**2 + Zp.imag**2, axis=0) / Zp.shape[0] + # Spectral power of Zp should be equal to the signal's power: + assert_allclose(psd_Zp, power_x) + + # Test round trip: + x1 = istft(Zp, input_onesided=False, boundary=True, scaling='psd')[1] + assert_allclose(x1, x) + + # The power of the one-sided psd-scaled STFT can be determined + # analogously (note that the two sides are not of equal shape): + Zp0 = stft(x, return_onesided=True, boundary='even', scaling='psd')[2] + + # Since x is real, its Fourier transform is conjugate symmetric, i.e., + # the missing 'second side' can be expressed through the 'first side': + Zp1 = np.conj(Zp0[-2:0:-1, :]) # 'second side' is conjugate reversed + assert_allclose(Zp[:129, :], Zp0) + assert_allclose(Zp[129:, :], Zp1) + + # Calculate the spectral power: + s2 = (np.sum(Zp0.real ** 2 + Zp0.imag ** 2, axis=0) + + np.sum(Zp1.real ** 2 + Zp1.imag ** 2, axis=0)) + psd_Zp01 = s2 / (Zp0.shape[0] + Zp1.shape[0]) + assert_allclose(psd_Zp01, power_x) + + # Test round trip: + x1 = istft(Zp0, input_onesided=True, boundary=True, scaling='psd')[1] + assert_allclose(x1, x) + + +class TestSampledSpectralRepresentations: + """Check energy/power relations from `Spectral Analysis` section in the user guide. + + A 32 sample cosine signal is used to compare the numerical to the expected results + stated in :ref:`tutorial_SpectralAnalysis` in + file ``doc/source/tutorial/signal.rst`` + """ + n: int = 32 #: number of samples + T: float = 1/16 #: sampling interval + a_ref: float = 3 #: amplitude of reference + l_a: int = 3 #: index in fft for defining frequency of test signal + + x_ref: np.ndarray #: reference signal + X_ref: np.ndarray #: two-sided FFT of x_ref + E_ref: float #: energy of signal + P_ref: float #: power of signal + + def setup_method(self): + """Create Cosine signal with amplitude a from spectrum. """ + f = rfftfreq(self.n, self.T) + X_ref = np.zeros_like(f) + self.l_a = 3 + X_ref[self.l_a] = self.a_ref/2 * self.n # set amplitude + self.x_ref = irfft(X_ref) + self.X_ref = fft(self.x_ref) + + # Closed form expression for continuous-time signal: + self.E_ref = self.tau * self.a_ref**2 / 2 # energy of signal + self.P_ref = self.a_ref**2 / 2 # power of signal + + @property + def tau(self) -> float: + """Duration of signal. """ + return self.n * self.T + + @property + def delta_f(self) -> float: + """Bin width """ + return 1 / (self.n * self.T) + + def test_reference_signal(self): + """Test energy and power formulas. """ + # Verify that amplitude is a: + assert_allclose(2*self.a_ref, np.ptp(self.x_ref), rtol=0.1) + # Verify that energy expression for sampled signal: + assert_allclose(self.T * sum(self.x_ref ** 2), self.E_ref) + + # Verify that spectral energy and power formulas are correct: + sum_X_ref_squared = sum(self.X_ref.real**2 + self.X_ref.imag**2) + assert_allclose(self.T/self.n * sum_X_ref_squared, self.E_ref) + assert_allclose(1/self.n**2 * sum_X_ref_squared, self.P_ref) + + def test_windowed_DFT(self): + """Verify spectral representations of windowed DFT. + + Furthermore, the scalings of `periodogram` and `welch` are verified. + """ + w = hann(self.n, sym=False) + c_amp, c_rms = abs(sum(w)), np.sqrt(sum(w.real**2 + w.imag**2)) + Xw = fft(self.x_ref*w) # unnormalized windowed DFT + + # Verify that the *spectrum* peak is consistent: + assert_allclose(self.tau * Xw[self.l_a] / c_amp, self.a_ref * self.tau / 2) + # Verify that the *amplitude spectrum* peak is consistent: + assert_allclose(Xw[self.l_a] / c_amp, self.a_ref/2) + + # Verify spectral power/energy equals signal's power/energy: + X_ESD = self.tau * self.T * abs(Xw / c_rms)**2 # Energy Spectral Density + X_PSD = self.T * abs(Xw / c_rms)**2 # Power Spectral Density + assert_allclose(self.delta_f * sum(X_ESD), self.E_ref) + assert_allclose(self.delta_f * sum(X_PSD), self.P_ref) + + # Verify scalings of periodogram: + kw = dict(fs=1/self.T, window=w, detrend=False, return_onesided=False) + _, P_mag = periodogram(self.x_ref, scaling='spectrum', **kw) + _, P_psd = periodogram(self.x_ref, scaling='density', **kw) + + # Verify that periodogram calculates a squared magnitude spectrum: + float_res = np.finfo(P_mag.dtype).resolution + assert_allclose(P_mag, abs(Xw/c_amp)**2, atol=float_res*max(P_mag)) + # Verify that periodogram calculates a PSD: + assert_allclose(P_psd, X_PSD, atol=float_res*max(P_psd)) + + # Ensure that scaling of welch is the same as of periodogram: + kw = dict(nperseg=len(self.x_ref), noverlap=0, **kw) + assert_allclose(welch(self.x_ref, scaling='spectrum', **kw)[1], P_mag, + atol=float_res*max(P_mag)) + assert_allclose(welch(self.x_ref, scaling='density', **kw)[1], P_psd, + atol=float_res*max(P_psd)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..7f84a804b52fc70b19bfe8e9d731c086f34179b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py @@ -0,0 +1,351 @@ +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, + assert_, assert_allclose, assert_array_equal) +from pytest import raises as assert_raises + +import scipy.signal._waveforms as waveforms + + +# These chirp_* functions are the instantaneous frequencies of the signals +# returned by chirp(). + +def chirp_linear(t, f0, f1, t1): + f = f0 + (f1 - f0) * t / t1 + return f + + +def chirp_quadratic(t, f0, f1, t1, vertex_zero=True): + if vertex_zero: + f = f0 + (f1 - f0) * t**2 / t1**2 + else: + f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2 + return f + + +def chirp_geometric(t, f0, f1, t1): + f = f0 * (f1/f0)**(t/t1) + return f + + +def chirp_hyperbolic(t, f0, f1, t1): + f = f0*f1*t1 / ((f0 - f1)*t + f1*t1) + return f + + +def compute_frequency(t, theta): + """ + Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t). + """ + # Assume theta and t are 1-D NumPy arrays. + # Assume that t is uniformly spaced. + dt = t[1] - t[0] + f = np.diff(theta)/(2*np.pi) / dt + tf = 0.5*(t[1:] + t[:-1]) + return tf, f + + +class TestChirp: + + def test_linear_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear') + assert_almost_equal(w, 1.0) + + def test_linear_freq_01(self): + method = 'linear' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_linear_freq_02(self): + method = 'linear' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_quadratic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic') + assert_almost_equal(w, 1.0) + + def test_quadratic_at_zero2(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic', + vertex_zero=False) + assert_almost_equal(w, 1.0) + + def test_quadratic_freq_01(self): + method = 'quadratic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_quadratic_freq_02(self): + method = 'quadratic' + f0 = 20.0 + f1 = 10.0 + t1 = 10.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_logarithmic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic') + assert_almost_equal(w, 1.0) + + def test_logarithmic_freq_01(self): + method = 'logarithmic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_logarithmic_freq_02(self): + method = 'logarithmic' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_logarithmic_freq_03(self): + method = 'logarithmic' + f0 = 100.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_hyperbolic_at_zero(self): + w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic') + assert_almost_equal(w, 1.0) + + def test_hyperbolic_freq_01(self): + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 10000) + # f0 f1 + cases = [[10.0, 1.0], + [1.0, 10.0], + [-10.0, -1.0], + [-1.0, -10.0]] + for f0, f1 in cases: + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + expected = chirp_hyperbolic(tf, f0, f1, t1) + assert_allclose(f, expected) + + def test_hyperbolic_zero_freq(self): + # f0=0 or f1=0 must raise a ValueError. + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 5) + assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method) + assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method) + + def test_unknown_method(self): + method = "foo" + f0 = 10.0 + f1 = 20.0 + t1 = 1.0 + t = np.linspace(0, t1, 10) + assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) + + def test_integer_t1(self): + f0 = 10.0 + f1 = 20.0 + t = np.linspace(-1, 1, 11) + t1 = 3.0 + float_result = waveforms.chirp(t, f0, t1, f1) + t1 = 3 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 't1=3' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f0(self): + f1 = 20.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f0 = 10.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f0 = 10 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f1(self): + f0 = 10.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f1 = 20.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f1 = 20 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f1=20' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_all(self): + f0 = 10 + t1 = 3 + f1 = 20 + t = np.linspace(-1, 1, 11) + float_result = waveforms.chirp(t, float(f0), float(t1), float(f1)) + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestSweepPoly: + + def test_sweep_poly_quad1(self): + p = np.poly1d([1.0, 0.0, 1.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_const(self): + p = np.poly1d(2.0) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_linear(self): + p = np.poly1d([-1.0, 10.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_quad2(self): + p = np.poly1d([1.0, 0.0, -2.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_cubic(self): + p = np.poly1d([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_cubic2(self): + """Use an array of coefficients instead of a poly1d.""" + p = np.array([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_cubic3(self): + """Use a list of coefficients instead of a poly1d.""" + p = [2.0, 1.0, 0.0, -2.0] + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + +class TestGaussPulse: + + def test_integer_fc(self): + float_result = waveforms.gausspulse('cutoff', fc=1000.0) + int_result = waveforms.gausspulse('cutoff', fc=1000) + err_msg = "Integer input 'fc=1000' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bw(self): + float_result = waveforms.gausspulse('cutoff', bw=1.0) + int_result = waveforms.gausspulse('cutoff', bw=1) + err_msg = "Integer input 'bw=1' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bwr(self): + float_result = waveforms.gausspulse('cutoff', bwr=-6.0) + int_result = waveforms.gausspulse('cutoff', bwr=-6) + err_msg = "Integer input 'bwr=-6' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_tpr(self): + float_result = waveforms.gausspulse('cutoff', tpr=-60.0) + int_result = waveforms.gausspulse('cutoff', tpr=-60) + err_msg = "Integer input 'tpr=-60' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestUnitImpulse: + + def test_no_index(self): + assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0]) + assert_array_equal(waveforms.unit_impulse((3, 3)), + [[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + def test_index(self): + assert_array_equal(waveforms.unit_impulse(10, 3), + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]) + assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)), + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + + # Broadcasting + imp = waveforms.unit_impulse((4, 4), 2) + assert_array_equal(imp, np.array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 0]])) + + def test_mid(self): + assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'), + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_array_equal(waveforms.unit_impulse(9, 'mid'), + [0, 0, 0, 0, 1, 0, 0, 0, 0]) + + def test_dtype(self): + imp = waveforms.unit_impulse(7) + assert_(np.issubdtype(imp.dtype, np.floating)) + + imp = waveforms.unit_impulse(5, 3, dtype=int) + assert_(np.issubdtype(imp.dtype, np.integer)) + + imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex) + assert_(np.issubdtype(imp.dtype, np.complexfloating)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..e83e6918429bfc539a44fc9a627deabafe2852a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py @@ -0,0 +1,161 @@ +import numpy as np +from numpy.testing import (assert_equal, + assert_array_equal, assert_array_almost_equal, assert_array_less, assert_,) +import pytest + +import scipy.signal._wavelets as wavelets + + +class TestWavelets: + def test_qmf(self): + with pytest.deprecated_call(): + assert_array_equal(wavelets.qmf([1, 1]), [1, -1]) + + def test_daub(self): + with pytest.deprecated_call(): + for i in range(1, 15): + assert_equal(len(wavelets.daub(i)), i * 2) + + def test_cascade(self): + with pytest.deprecated_call(): + for J in range(1, 7): + for i in range(1, 5): + lpcoef = wavelets.daub(i) + k = len(lpcoef) + x, phi, psi = wavelets.cascade(lpcoef, J) + assert_(len(x) == len(phi) == len(psi)) + assert_equal(len(x), (k - 1) * 2 ** J) + + def test_morlet(self): + with pytest.deprecated_call(): + x = wavelets.morlet(50, 4.1, complete=True) + y = wavelets.morlet(50, 4.1, complete=False) + # Test if complete and incomplete wavelet have same lengths: + assert_equal(len(x), len(y)) + # Test if complete wavelet is less than incomplete wavelet: + assert_array_less(x, y) + + x = wavelets.morlet(10, 50, complete=False) + y = wavelets.morlet(10, 50, complete=True) + # For large widths complete and incomplete wavelets should be + # identical within numerical precision: + assert_equal(x, y) + + # miscellaneous tests: + x = np.array([1.73752399e-09 + 9.84327394e-25j, + 6.49471756e-01 + 0.00000000e+00j, + 1.73752399e-09 - 9.84327394e-25j]) + y = wavelets.morlet(3, w=2, complete=True) + assert_array_almost_equal(x, y) + + x = np.array([2.00947715e-09 + 9.84327394e-25j, + 7.51125544e-01 + 0.00000000e+00j, + 2.00947715e-09 - 9.84327394e-25j]) + y = wavelets.morlet(3, w=2, complete=False) + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, s=4, complete=True) + y = wavelets.morlet(20000, s=8, complete=True)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, s=4, complete=False) + assert_array_almost_equal(y, x, decimal=2) + y = wavelets.morlet(20000, s=8, complete=False)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=3, s=5, complete=True) + y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=3, s=5, complete=False) + assert_array_almost_equal(y, x, decimal=2) + y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=7, s=10, complete=True) + y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=7, s=10, complete=False) + assert_array_almost_equal(x, y, decimal=2) + y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + def test_morlet2(self): + with pytest.deprecated_call(): + w = wavelets.morlet2(1.0, 0.5) + expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex) + assert_array_equal(w, expected) + + lengths = [5, 11, 15, 51, 101] + for length in lengths: + w = wavelets.morlet2(length, 1.0) + assert_(len(w) == length) + max_loc = np.argmax(w) + assert_(max_loc == (length // 2)) + + points = 100 + w = abs(wavelets.morlet2(points, 2.0)) + half_vec = np.arange(0, points // 2) + assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) + + x = np.array([5.03701224e-09 + 2.46742437e-24j, + 1.88279253e+00 + 0.00000000e+00j, + 5.03701224e-09 - 2.46742437e-24j]) + y = wavelets.morlet2(3, s=1/(2*np.pi), w=2) + assert_array_almost_equal(x, y) + + def test_ricker(self): + with pytest.deprecated_call(): + w = wavelets.ricker(1.0, 1) + expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25)) + assert_array_equal(w, expected) + + lengths = [5, 11, 15, 51, 101] + for length in lengths: + w = wavelets.ricker(length, 1.0) + assert_(len(w) == length) + max_loc = np.argmax(w) + assert_(max_loc == (length // 2)) + + points = 100 + w = wavelets.ricker(points, 2.0) + half_vec = np.arange(0, points // 2) + #Wavelet should be symmetric + assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) + + #Check zeros + aas = [5, 10, 15, 20, 30] + points = 99 + for a in aas: + w = wavelets.ricker(points, a) + vec = np.arange(0, points) - (points - 1.0) / 2 + exp_zero1 = np.argmin(np.abs(vec - a)) + exp_zero2 = np.argmin(np.abs(vec + a)) + assert_array_almost_equal(w[exp_zero1], 0) + assert_array_almost_equal(w[exp_zero2], 0) + + def test_cwt(self): + with pytest.deprecated_call(): + widths = [1.0] + def delta_wavelet(s, t): + return np.array([1]) + len_data = 100 + test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0) + + #Test delta function input gives same data as output + cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths) + assert_(cwt_dat.shape == (len(widths), len_data)) + assert_array_almost_equal(test_data, cwt_dat.flatten()) + + #Check proper shape on output + widths = [1, 3, 4, 5, 10] + cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths) + assert_(cwt_dat.shape == (len(widths), len_data)) + + widths = [len_data * 10] + #Note: this wavelet isn't defined quite right, but is fine for this test + def flat_wavelet(l, w): + return np.full(w, 1 / w) + cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths) + assert_array_almost_equal(cwt_dat, np.mean(test_data)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/waveforms.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..2fa191bef7febde46310e25a8886037539370881 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/waveforms.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse', 'place', 'nan', 'mod', 'extract', 'log', 'exp', + 'polyval', 'polyint' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="waveforms", + private_modules=["_waveforms"], all=__all__, + attribute=name)