Skip to content

Commit

Permalink
- branch name based on google sheet
Browse files Browse the repository at this point in the history
  • Loading branch information
DaniloMilenkovic committed Oct 28, 2024
1 parent 2c38f15 commit 37a5b59
Show file tree
Hide file tree
Showing 2 changed files with 166 additions and 16 deletions.
39 changes: 23 additions & 16 deletions .github/workflows/sdkAutomation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,26 +86,33 @@ jobs:
# Create a new branch
- name: Create branch
env:
RELEASES_SPREADSHEET: ${{ secrets.RELEASES_SPREADSHEET }}
run: |
git fetch --all
if [[ `git branch -a | grep "new-feature/automation"` ]]; then
highest=$(git branch -a | grep "new-feature/automation" | grep -E -o '[0-9]+' | sort -n | tail -1)
if ${{ github.event.inputs.new_branch }} == true; then
number=$((highest+1))
branch_name = $(python scripts/release_calendar.py "NECTO DAILY UPDATE" $RELEASES_SPREADSHEET)
if $branch_name == "new_branch"; then
git fetch --all
if [[ `git branch -a | grep "new-feature/automation"` ]]; then
highest=$(git branch -a | grep "new-feature/automation" | grep -E -o '[0-9]+' | sort -n | tail -1)
if ${{ github.event.inputs.new_branch }} == true; then
number=$((highest+1))
else
number=$((highest))
fi
else
number=$((highest))
number=1
fi
else
number=1
branch_name = "new-feature/automation/$number"
fi
echo "Found number: $number"
echo "Found number: $branch_name"
if ${{ github.event.inputs.new_branch }} == true; then
git branch "new-feature/automation/$number"
git checkout "new-feature/automation/$number"
git push -u origin "new-feature/automation/$number"
git branch $branch_name
git checkout $branch_name
git push -u origin $branch_name
else
git checkout "new-feature/automation/$number"
git checkout $branch_name
git reset --hard $(git rev-list --max-parents=1 )
fi
Expand Down Expand Up @@ -208,16 +215,16 @@ jobs:
GH_TOKEN: ${{ secrets.MIKROE_ACTIONS_KEY }}

# Move the asset we just downloaded to automatization and extract it
- name: Move to the created directory unzip and distribute files
- name: Move to the created directory unzip and distribute files
run: |
sudo mv generated_files.zip automatization/
cd automatization
unzip generated_files.zip
mkdir -p ../${{ github.event.inputs.architecture }}/gcc_clang/def/${{ github.event.inputs.vendor }}/${{ github.event.inputs.mcu_name }}
mkdir -p ../${{ github.event.inputs.architecture }}/gcc_clang/interrupts/include/interrupts_mcu/${{ github.event.inputs.mcu_name }}
mkdir -p ../${{ github.event.inputs.architecture }}/gcc_clang/system/src/${{ github.event.inputs.vendor }}/doc_ds_1
sudo mv 'mcu.h' ../${{ github.event.inputs.architecture }}/gcc_clang/def/${{ github.event.inputs.vendor }}/${{ github.event.inputs.mcu_name }}/
sudo mv "$(find . -maxdepth 1 -name '*.json' | head -n 1)" ../${{ github.event.inputs.architecture }}/gcc_clang/def/
sudo mv 'interrupts_mcu.h' ../${{ github.event.inputs.architecture }}/gcc_clang/interrupts/include/interrupts_mcu/${{ github.event.inputs.mcu_name }}/
Expand Down
143 changes: 143 additions & 0 deletions scripts/release_calendar.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import os, argparse, json, urllib.request

from datetime import datetime

def get_data(link, calendar_title, saveToFile=None):
"""
Fetches data from the provided link, processes the release information, and merges events with the same start date.
:param link: URL to fetch the CSV data.
:param saveToFile: Optional; path to save the processed JSON data.
:return: A list of merged events based on their start date.
"""
try:
## Fetch the data from the provided link
with urllib.request.urlopen(link) as f:
html = f.read().decode('utf-8')
## Save fetched data temporarily to a file named 'releases.txt'
with open(os.path.join(os.path.dirname(__file__), 'releases.txt'), 'w') as releases:
releases.write(html)
releases.close()
except Exception as e:
## Handle errors that may occur during the data fetch process
print(f"Error fetching data: {e}")
## Read lines from the temporary file
with open(os.path.join(os.path.dirname(__file__), 'releases.txt'), 'r') as releases:
all_releases = releases.readlines()
releases.close()
## Remove the temporary file after reading its content
if os.path.exists(os.path.join(os.path.dirname(__file__), "releases.txt")):
os.remove(os.path.join(os.path.dirname(__file__), "releases.txt"))
## List to store formatted event data
formatted_array = []
for each_line in all_releases:
## Split the line into individual parts based on commas
parts = each_line.split(',')
## Skip empty lines or headers
if parts[0] == '' or parts[0] == 'Product name' or parts[0] == '\n':
continue
## Extract the board name and release plan date
board_name = parts[0]
try:
## Parse the release date from the format 'dd.mm.yyyy'
release_date = datetime.strptime(parts[3], "%d.%m.%Y")
except ValueError:
## Skip lines with incorrect date format
continue
## Create a dictionary for each event and append to the formatted array
formatted_array.append(
{
"notes": f"<ul>\n<li>{board_name}</li>\n</ul>",
"branch": parts[1],
"tz": "Europe/Belgrade",
"start_dt": release_date.strftime("%Y-%m-%d"),
}
)
## Dictionary to merge nodes based on the start date
merged_nodes = {}
## Merge events with the same start date by combining their notes
for value in formatted_array:
start_dt = value['start_dt']
if start_dt not in merged_nodes:
## Add new event if the start date does not exist
merged_nodes[start_dt] = value
else:
## Combine the notes of events with the same start date
existing_notes = merged_nodes[start_dt]['notes']
new_notes = value['notes'].replace('<ul>', '').replace('</ul>', '') ## Strip outer tags to merge correctly
merged_nodes[start_dt]['notes'] = existing_notes.replace('</ul>', '') + new_notes + '</ul>'
## Convert the merged_nodes dictionary back into a list of merged events
merged_events = list(merged_nodes.values())
## Save the merged events to a file if saveToFile path is provided
if saveToFile:
try:
with open(saveToFile, 'w') as json_file:
json_file.write(json.dumps(merged_events, indent=4))
except IOError as e:
## Handle errors during file saving
print(f"Error saving to file: {e}")
## Return the list of merged events
return merged_events

def fetch_data(release_table_link, calendar_title, save_to_file=None):
"""
Fetches data from the release table link and stores the processed events.
:param save_to_file: Optional; path to save the processed JSON data.
"""
## Fetch data from the Google Sheets CSV export link and populate events
file_out = {
calendar_title: {
"events": []
}
}

file_out[calendar_title]["events"] = \
get_data(
f'https://docs.google.com/spreadsheets/d/{release_table_link}/export?format=csv',
calendar_title,
save_to_file
)

return file_out

def generate_file(file_data, file_out_path):
"""
Generates a JSON file containing the processed event data.
:param file_out_path: Path to save the final output JSON file.
"""
try:
with open(file_out_path, 'w') as file:
file.write(json.dumps(file_data, indent=4))
except IOError as e:
## Handle errors that may occur during file generation
print(f"Error generating file: {e}")

def find_branch():
with open(os.path.join(os.path.dirname(__file__), 'releases.json')) as file:
json_data = json.load(file)
current_date = f'{datetime.today().year}-{datetime.today().month}-{datetime.today().day}'

for release in json_data['NECTO DAILY UPDATE']["events"]:
date = release["start_dt"]
if "2025-02-05" == date:
return release["branch"]

return "new_branch"

if __name__ == "__main__":
## Set up argument parsing
parser = argparse.ArgumentParser(description="Iterate through dates in a range and create calendar events if needed.")
parser.add_argument("--title", type = str, help="Event title for calendar.", required=True)
parser.add_argument("--doc_link", type = str, help="Spreadsheet table with release details - link.",required=True)

## Parse the arguments
args = parser.parse_args()

## Update release calendar values
fileData = fetch_data(args.doc_link, args.title)
## Then generate the input file for teamup API
generate_file(fileData, os.path.join(os.path.dirname(__file__), 'releases.json'))
## Find branch name from the jsom data for the current time
print(find_branch())



0 comments on commit 37a5b59

Please sign in to comment.