Skip to content

Commit

Permalink
[FEAT] Add colab badges to docs (#165)
Browse files Browse the repository at this point in the history
  • Loading branch information
AzulGarza authored Nov 9, 2023
2 parents bb46813 + 7e1cd4f commit e0dcc35
Show file tree
Hide file tree
Showing 18 changed files with 563 additions and 45 deletions.
1 change: 1 addition & 0 deletions .fernignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ settings.ini
setup.py
pyproject.toml
nixtlats/timegpt.py
nixtlats/utils.py
nixtlats/_modidx.py
nixtlats/__init__.py
nixtlats/distributed/
Expand Down
20 changes: 10 additions & 10 deletions action_files/create_sdk_reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@


def create_sdk_reference(
save_dir,
slug_number,
host_url=os.environ['README_HOST_URL'],
category=os.environ['README_CATEGORY'],
):
file_path = f'{save_dir}/{slug_number}_sdk_reference.md'
save_dir,
slug_number,
host_url=os.environ["README_HOST_URL"],
category=os.environ["README_CATEGORY"],
):
file_path = f"{save_dir}/{slug_number}_sdk_reference.md"
header = f"""---
title: "SDK Reference"
slug: "sdk_reference"
Expand All @@ -25,10 +25,10 @@ def create_sdk_reference(
---
"""
with open(file_path, 'w', encoding='utf-8') as file:

with open(file_path, "w", encoding="utf-8") as file:
file.write(header)

if __name__=="__main__":
fire.Fire(create_sdk_reference)

if __name__ == "__main__":
fire.Fire(create_sdk_reference)
75 changes: 41 additions & 34 deletions action_files/modify_markdown.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,28 @@

def to_snake_case(s):
s = s.lower()
s = re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower()
s = re.sub(r'\W', '_', s)
s = re.sub(r'_+', '_', s)
s = re.sub(r"(?<!^)(?=[A-Z])", "_", s).lower()
s = re.sub(r"\W", "_", s)
s = re.sub(r"_+", "_", s)
return s


def merge_lines(md_text):
code_block_pattern = re.compile(r'``` python([\s\S]*?)```', re.MULTILINE)
code_block_pattern = re.compile(r"``` python([\s\S]*?)```", re.MULTILINE)
code_blocks = code_block_pattern.findall(md_text)
md_text_no_code = code_block_pattern.sub('CODEBLOCK', md_text)
lines = md_text_no_code.split('\n')
md_text_no_code = code_block_pattern.sub("CODEBLOCK", md_text)
lines = md_text_no_code.split("\n")
merged_lines = []
buffer_line = ""
in_div_block = False
for line in lines:
if line.strip().lower().startswith('<div>'):
if line.strip().lower().startswith("<div>"):
in_div_block = True
elif line.strip().lower().endswith('</div>'):
elif line.strip().lower().endswith("</div>"):
in_div_block = False
if (in_div_block or
line.startswith((' ', '> ', '#', '-', '*', '1.', '2.', '3.', 'CODEBLOCK', '!', '['))):
if in_div_block or line.startswith(
(" ", "> ", "#", "-", "*", "1.", "2.", "3.", "CODEBLOCK", "!", "[")
):
if buffer_line:
merged_lines.append(buffer_line.strip())
buffer_line = ""
Expand All @@ -37,34 +39,37 @@ def merge_lines(md_text):
buffer_line += line.strip() + " "
if buffer_line:
merged_lines.append(buffer_line.strip())
md_text_merged = '\n'.join(merged_lines)
md_text_merged = "\n".join(merged_lines)
for code_block in code_blocks:
md_text_merged = md_text_merged.replace('CODEBLOCK', f'\n``` python\n{code_block}\n```\n', 1)
md_text_merged = md_text_merged.replace(
"CODEBLOCK", f"\n``` python\n{code_block}\n```\n", 1
)
return md_text_merged


def modify_markdown(
file_path,
slug_number=0,
host_url=os.environ['README_HOST_URL'],
category=os.environ['README_CATEGORY'],
):
with open(file_path, 'r', encoding='utf-8') as file:
file_path,
slug_number=0,
host_url=os.environ["README_HOST_URL"],
category=os.environ["README_CATEGORY"],
):
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
dir_path = os.path.dirname(file_path)
if not dir_path.endswith("/"):
dir_path += "/"

# Extract and remove the first markdown header
pattern_header = re.compile(r'^#\s+(.*)\n+', re.MULTILINE)
pattern_header = re.compile(r"^#\s+(.*)\n+", re.MULTILINE)
match = pattern_header.search(content)

if match:
title = match.group(1)
content = pattern_header.sub('', content, count=1) # remove the first match
content = pattern_header.sub("", content, count=1) # remove the first match
else:
title = 'Something Amazing'
title = "Something Amazing"
slug = to_snake_case(title)

# Prepare the new header
header = f"""---
title: "{title}"
Expand All @@ -76,22 +81,24 @@ def modify_markdown(
---
"""

# Remove parts delimited by ::: :::
pattern_delimited = re.compile(r':::.*?:::', re.DOTALL)
content = pattern_delimited.sub('', content)
pattern_delimited = re.compile(r":::.*?:::", re.DOTALL)
content = pattern_delimited.sub("", content)

# Modify image paths
pattern_image = re.compile(r'!\[\]\((.*?)\)')
content = content.replace('![figure](../../', f'![figure]({host_url}/nbs/')
modified_content = pattern_image.sub(r'![](' + host_url + dir_path + r'\1)', content)
content = content.replace("![figure](../../", f"![figure]({host_url}/nbs/")
pattern_image = re.compile(r"!\[\]\(((?!\.svg)*?)\)")
modified_content = pattern_image.sub(
r"![](" + host_url + dir_path + r"\1)", content
)

# Concatenate new header and modified content
final_content = header + merge_lines(modified_content)
with open(file_path, 'w', encoding='utf-8') as file:

with open(file_path, "w", encoding="utf-8") as file:
file.write(final_content)

if __name__=="__main__":
fire.Fire(modify_markdown)

if __name__ == "__main__":
fire.Fire(modify_markdown)
35 changes: 35 additions & 0 deletions nbs/docs/getting-started/1_getting_started_short.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,41 @@
"> Unlock the power of accurate predictions and confidently navigate uncertainty. Reduce uncertainty and resource limitations. With TimeGPT, you can effortlessly access state-of-the-art models to make data-driven decisions. Whether you're a bank forecasting market trends or a startup predicting product demand, TimeGPT democratizes access to cutting-edge predictive insights."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fb455c30-d11d-49a1-8825-c908328bc63a",
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"from nixtlats.utils import colab_badge"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f2820d3a-47dd-4827-925f-63f454701c79",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"[![](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Nixtla/nixtla/blob/main/nbs/docs/getting-started/1_getting_started_short.ipynb)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"#| echo: false\n",
"colab_badge('docs/getting-started/1_getting_started_short')"
]
},
{
"cell_type": "markdown",
"id": "568b7aba-5990-477b-a012-9bd30815cfe3",
Expand Down
35 changes: 35 additions & 0 deletions nbs/docs/how-to-guides/0_distributed_fcst_spark.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,41 @@
"`TimeGPT` works on top of Spark, Dask, and Ray through Fugue. `TimeGPT` will read the input DataFrame and use the corresponding engine. For example, if the input is a Spark DataFrame, StatsForecast will use the existing Spark session to run the forecast.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a3119cd0-9b9d-4df9-9779-005847c46048",
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"from nixtlats.utils import colab_badge"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dbd11fae-3219-4ffc-b2de-a96542362d58",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"[![](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Nixtla/nixtla/blob/main/nbs/docs/how-to-guides/0_distributed_fcst_spark.ipynb)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"#| echo: false\n",
"colab_badge('docs/how-to-guides/0_distributed_fcst_spark')"
]
},
{
"cell_type": "markdown",
"id": "361d702c-361f-4321-85d3-2b76fb7b4937",
Expand Down
35 changes: 35 additions & 0 deletions nbs/docs/how-to-guides/1_distributed_cv_spark.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,41 @@
"`TimeGPT` works on top of Spark, Dask, and Ray through Fugue. `TimeGPT` will read the input DataFrame and use the corresponding engine. For example, if the input is a Spark DataFrame, StatsForecast will use the existing Spark session to run the forecast.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5051a20b-716a-4e83-ab9a-6472c7e4a4fa",
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"from nixtlats.utils import colab_badge"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9ec6d4ad-7514-4ee9-8ca5-2ef027c45e6a",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"[![](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Nixtla/nixtla/blob/main/nbs/docs/how-to-guides/1_distributed_cv_spark.ipynb)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"#| echo: false\n",
"colab_badge('docs/how-to-guides/1_distributed_cv_spark')"
]
},
{
"cell_type": "markdown",
"id": "361d702c-361f-4321-85d3-2b76fb7b4937",
Expand Down
35 changes: 35 additions & 0 deletions nbs/docs/tutorials/0_anomaly_detection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,41 @@
"Anomaly detection in time series data plays a pivotal role in numerous sectors including finance, healthcare, security, and infrastructure. In essence, time series data represents a sequence of data points indexed (or listed or graphed) in time order, often with equal intervals. As systems and processes become increasingly digitized and interconnected, the need to monitor and ensure their normal behavior grows proportionally. Detecting anomalies can indicate potential problems, malfunctions, or even malicious activities. By promptly identifying these deviations from the expected pattern, organizations can take preemptive measures, optimize processes, or protect resources. `TimeGPT` includes the `detect_anomalies` method to detect anomalies automatically."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "db88512e-9e9e-4f01-83a6-bec1ccdbff8e",
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"from nixtlats.utils import colab_badge"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7528cf16-4e82-4ea0-bfe0-4d55fa71db0c",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"[![](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Nixtla/nixtla/blob/main/nbs/docs/tutorials/0_anomaly_detection.ipynb)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"#| echo: false\n",
"colab_badge('docs/tutorials/0_anomaly_detection')"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down
35 changes: 35 additions & 0 deletions nbs/docs/tutorials/1_exogenous_variables.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,41 @@
"To incorporate exogenous variables in TimeGPT, you'll need to pair each point in your time series data with the corresponding external data."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "44ec954b-63b3-4236-86a5-5a2cd86de934",
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"from nixtlats.utils import colab_badge"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0bc1e229-6c80-463f-b721-465a48fbddf1",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"[![](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Nixtla/nixtla/blob/main/nbs/docs/tutorials/1_exogenous_variables.ipynb)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"#| echo: false\n",
"colab_badge('docs/tutorials/1_exogenous_variables')"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down
Loading

0 comments on commit e0dcc35

Please sign in to comment.