Private
Public Access
1
0

export improvements

This commit is contained in:
2026-03-16 14:11:25 +01:00
parent db826f27e9
commit 41b0262681
8 changed files with 189 additions and 68 deletions

View File

@@ -828,6 +828,15 @@ class DateRangeWorkoutTypeForm(DateRangeForm):
typeselectchoices.append((wtype, verbose))
workouttype = forms.ChoiceField(initial='All', choices=typeselectchoices)
# add a radio button to select how in-stroke data be treated
instrokedatachoices = (
('off', 'Do not export in-stroke data'),
('summary', 'Export summary per stroke'),
('downsampled', 'Export downsampled time series (16 points per stroke)'),
('companion', 'Export as companion .instroke.json file with full curve data per stroke'))
instrokedata = forms.ChoiceField(initial='off', choices=instrokedatachoices, label='In-stroke data export')

View File

@@ -360,9 +360,46 @@ def correct_intensity(workout):
import io
import zipfile
@app.task
def email_user_workouts_zip_chunk(rower, workout_ids, filename, instrokedata,
part, total_parts, debug=False, **kwargs):
zip_file_path = os.path.join(settings.MEDIA_ROOT, filename)
with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for workout_id in workout_ids:
try:
workout = Workout.objects.get(id=workout_id)
sport = mytypes.fitmapping.get(workout.workouttype, 'generic')
fit_filename = f"workout_{sport}_{workout.id}_{workout.date.strftime('%Y%m%d')}.fit"
rowdata = rdata(csvfile=workout.csvfilename)
res = rowdata.exporttofit(fit_filename, sport=sport, notes=workout.name,
instroke_export=instrokedata)
zip_file.write(fit_filename, arcname=fit_filename)
os.remove(fit_filename)
if res.get('companion_file'):
companion_filename = res['companion_file']
zip_file.write(companion_filename, arcname=os.path.basename(companion_filename))
os.remove(companion_filename)
except Exception as e:
dologging('export_all_workouts.log',
f"Error exporting workout {workout_id}: {e}")
continue
download_url = f"{SITE_URL}/rowers/workouts/download/?file={filename}"
subject = f"Rowsandall Workouts Export (part {part} of {total_parts})"
send_template_email(
'Rowsandall <info@rowsandall.com>',
[rower.user.email],
subject,
'workouts_export_email.html',
{'download_url': download_url, 'filename': filename,
'part': part, 'total_parts': total_parts},
)
return 1
@app.task
def email_all_user_workouts_zip(rower, start_date, end_date,
workouttype, debug=False, **kwargs):
workouttype, instrokedata, debug=False, **kwargs):
# Get all workouts for this user, optionally filtered by date range
workouts = Workout.objects.filter(user=rower).order_by('-date')
@@ -379,10 +416,11 @@ def email_all_user_workouts_zip(rower, start_date, end_date,
dologging('export_all_workouts.log', f"No workouts found for user {rower.user.id} in date range {start_date} to {end_date}")
return 0
# Create ZIP file in memory
zip_buffer = io.BytesIO()
export_date = datetime.datetime.now().strftime('%Y%m%d')
filename = f"{rower.user.username}_workouts_{export_date}_from_{start_date}_to_{end_date}_{uuid4().hex[:8]}.zip"
zip_file_path = os.path.join(settings.MEDIA_ROOT, filename)
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for workout in workouts:
try:
rowdata = rdata(csvfile=workout.csvfilename)
@@ -390,24 +428,22 @@ def email_all_user_workouts_zip(rower, start_date, end_date,
fit_filename = f"workout_{workouttype}_{workout.id}_{workout.date.strftime('%Y%m%d')}.fit"
# exporttofit creates a file, we need to add it to the zip_file
rowdata.exporttofit(fit_filename, sport=workouttype, notes=workout.name)
res = rowdata.exporttofit(fit_filename, sport=workouttype, notes=workout.name,
instroke_export=instrokedata)
zip_file.write(fit_filename, arcname=fit_filename)
os.remove(fit_filename)
# res is a dict. If res[companion_file] is not None,
# it contains the filename of the companion file that was
# created (e.g. for instroke data) which also needs to be added to the zip
if res.get('companion_file'):
companion_filename = res['companion_file']
zip_file.write(companion_filename, arcname=os.path.basename(companion_filename))
os.remove(companion_filename)
except Exception as e: # pragma: no cover
dologging('export_all_workouts.log', f"Error exporting workout {workout.id}: {str(e)}")
continue
# Save ZIP file to disk
export_date = datetime.datetime.now().strftime('%Y%m%d')
filename = f"{rower.user.username}_workouts_{export_date}_from_{start_date}_to_{end_date}_{uuid4().hex[:8]}.zip"
zip_file_path = os.path.join(settings.MEDIA_ROOT, filename)
try:
with open(zip_file_path, 'wb') as f:
f.write(zip_buffer.getvalue())
except Exception as e: # pragma: no cover
dologging('export_all_workouts.log', f"Error saving ZIP file: {str(e)}")
return 0
# Send email with download link
subject = "Rowsandall Workouts Export"

View File

@@ -9,7 +9,7 @@
<div class="row">
<div class="col-md-8 offset-md-2">
<h2>Export All Your Workouts</h2>
<p>Select a date range to export your workouts as a ZIP file containing individual CSV files.</p>
<p>Select a date range to export your workouts as a ZIP file containing individual CSV files. Please be considerate and download only the workout types you need, and do downloads in batches (e.g. one year at a time).</p>
<form method="post" enctype="multipart/form-data">
{% csrf_token %}

Binary file not shown.

View File

@@ -416,7 +416,6 @@ def myqueue(queue, function, *args, **kwargs):
job_id = str(uuid.uuid4())
kwargs['job_id'] = job_id
kwargs['jobkey'] = job_id
kwargs['timeout'] = 3600
dologging('queue.log',function.__name__)

View File

@@ -280,7 +280,6 @@ import io
def export_all_workouts_zip_view(request):
from datetime import datetime
r = getrower(request.user)
if request.method == 'GET':
form = DateRangeWorkoutTypeForm()
elif request.method == 'POST':
@@ -292,19 +291,39 @@ def export_all_workouts_zip_view(request):
startdate = form.cleaned_data['startdate']
enddate = form.cleaned_data['enddate']
workouttype = form.cleaned_data['workouttype']
instrokedata = form.cleaned_data['instrokedata']
myqueue(queuehigh, email_all_user_workouts_zip, r, startdate, enddate, workouttype)
workouts = (Workout.objects.filter(user=r)
.order_by('-date')
.filter(date__gte=startdate, date__lte=enddate))
if workouttype != 'All':
workouts = workouts.filter(workouttype=workouttype)
successmessage = "A download link will be sent to you per email"
messages.info(request, successmessage)
workout_ids = list(workouts.values_list('id', flat=True))
if not workout_ids:
messages.warning(request, "No workouts found for the selected date range.")
return render(request, "export_workouts_daterange.html", {'form': form})
chunks = [workout_ids[i:i+100] for i in range(0, len(workout_ids), 100)]
total_parts = len(chunks)
export_date = datetime.now().strftime('%Y%m%d')
for i, chunk in enumerate(chunks, start=1):
filename = (f"{r.user.username}_workouts_{export_date}"
f"_part{i}of{total_parts}_{uuid4().hex[:8]}.zip")
myqueue(queuelow, email_user_workouts_zip_chunk,
r, chunk, filename, instrokedata, i, total_parts,
job_timeout=600)
messages.info(request, "A download link (or multiple download links) will be "
"sent to you by email. This may take up to one hour.")
# return to export settings view
return render(request, "export_workouts_daterange.html", {'form': form})
def download_zip_file_view(request):
# This view would be called when the user clicks the download link in the email
zip_file_path = request.GET.get('file')
print("Requested ZIP file path:", zip_file_path) # Debugging statement
# add media folder
zip_file_path = os.path.join(settings.MEDIA_ROOT, zip_file_path)

View File

@@ -255,6 +255,7 @@ from rowers.rows import handle_uploaded_file, handle_uploaded_image
from rowers.plannedsessions import *
from rowers.tasks import handle_makeplot, handle_otwsetpower, handle_sendemailtcx, handle_sendemailcsv
from rowers.tasks import (
email_user_workouts_zip_chunk,
email_all_user_workouts_zip,
handle_intervals_updateworkout,
handle_post_workout_api,

View File

@@ -156,21 +156,46 @@ Alternatively, technically confident submitters may open a PR directly.
Even in Stage 1, users need a way to log in to like courses and have those likes synced to CrewNerd. The full intervals.icu OAuth flow is implemented from the start — it is not deferred to Stage 2.
**Confirmed by David Tinker (intervals.icu):** the OAuth bearer token doubles as a login indicator. A successful token exchange means the user is authenticated with intervals.icu. No separate server-side session table is needed — the Worker encrypts the athlete ID and access token together and stores them in an HTTP-only cookie.
**Login flow:**
1. User visits the course browser and clicks "Sign in with intervals.icu".
2. Worker redirects to `https://intervals.icu/oauth/authorize?client_id=...&scope=PROFILE_READ&response_type=code`.
2. Worker redirects to `https://intervals.icu/oauth/authorize?client_id=...&scope=PROFILE_READ,ACTIVITY_READ&response_type=code`.
3. intervals.icu redirects to `GET /oauth/callback?code=...`.
4. Worker exchanges code for tokens, fetches the athlete profile (`GET /api/v1/athlete/self`), and stores the session in D1 `user_sessions`.
5. Worker issues a platform API key (random 32-byte hex, stored in KV as `apikey:{key}``athlete_id`) and sets it as a secure cookie.
4. Worker exchanges code for tokens and fetches the athlete profile (`GET /api/v1/athlete/self`) to confirm identity and retrieve the athlete ID.
5. Worker encrypts `{athleteId, accessToken, refreshToken, expiresAt}` using AES-GCM with `TOKEN_ENCRYPTION_KEY`.
6. Worker sets the encrypted blob as an HTTP-only, Secure, SameSite=Lax cookie named `rn_session`.
7. On all subsequent authenticated requests, the Worker decrypts the cookie to recover the athlete ID and access token. No D1 lookup needed.
The CrewNerd API key is this platform-issued key — the same scheme as the current Rowsandall API key. It never expires unless the user explicitly revokes it or re-generates it from their profile page.
**D1 is not required in Stage 1.** The cookie carries all state needed for browser-based authentication. D1 is introduced in Stage 2 for the `is_organizer` flag and challenge-related state.
In Stage 1, the only scope required is `PROFILE_READ` (to identify the athlete). `ACTIVITY_READ` is added in Stage 2 for GPS validation. Both scopes should be requested in Stage 1 to avoid a second OAuth prompt when Stage 2 launches, if intervals.icu supports incremental scope grants — confirm with David Tinker (see open question 7).
**CrewNerd API key:**
**D1 tables required in Stage 1:**
CrewNerd authenticates via `Authorization: ApiKey {key}` header — it cannot use cookies. Rather than storing API keys in KV or D1, the key is derived deterministically from the athlete ID using HMAC-SHA256 with `TOKEN_ENCRYPTION_KEY` as the secret:
The `user_sessions` table (see Stage 2 schema) is created in Stage 1. The `is_organizer` column defaults to 0 and is unused until Stage 2.
```typescript
async function apiKeyForAthlete(athleteId: string, secret: string): Promise<string> {
const key = await crypto.subtle.importKey(
'raw', new TextEncoder().encode(secret),
{ name: 'HMAC', hash: 'SHA-256' }, false, ['sign']
);
const sig = await crypto.subtle.sign(
'HMAC', key, new TextEncoder().encode(athleteId)
);
return btoa(String.fromCharCode(...new Uint8Array(sig)))
.replace(/\+/g, '-').replace(/\//g, '_').replace(/=/g, '');
}
```
The same function run twice on the same athlete ID always produces the same key. On an incoming CrewNerd API request, the Worker derives the expected key from the athlete ID embedded in the request and compares — no storage or lookup needed. The API key is shown to the user on their profile page so they can configure CrewNerd.
**Scopes:** both `PROFILE_READ` and `ACTIVITY_READ` are requested at Stage 1 login. `ACTIVITY_READ` is not used until Stage 2 GPS validation, but requesting it upfront avoids a re-authorisation prompt when Stage 2 launches.
**Stage 1 secrets required:**
- `INTERVALS_CLIENT_ID`
- `INTERVALS_CLIENT_SECRET`
- `TOKEN_ENCRYPTION_KEY` (used for both cookie encryption and API key derivation)
### 1.5 KML generation
@@ -185,7 +210,24 @@ Pre-generated KML files are stored in `kml/{id}.kml` in the repository and serve
### 1.6 CrewNerd API surface
These four endpoints must be present and respond identically to the current Rowsandall endpoints. Authentication is via `Authorization: ApiKey {key}` header, matching the existing Rowsandall API key scheme. The API key is looked up in KV (`apikey:{key}``athlete_id`) to identify the user.
These endpoints must be present and respond identically to the current Rowsandall endpoints. Authentication is via `Authorization: ApiKey {key}` header — the same scheme CrewNerd uses today with Rowsandall, requiring no code change on Tony's side.
The API key is verified by re-deriving it from the athlete ID using `apiKeyForAthlete()` and comparing — no KV or D1 lookup needed.
**API key issuance for CrewNerd — no browser redirect needed:**
CrewNerd already holds the user's intervals.icu bearer token from its existing intervals.icu integration. Rather than requiring users to manually copy a key from a web page, CrewNerd can exchange its existing intervals.icu token for a rownative API key in a single background HTTP call:
```
POST /api/auth/crewnerd
Authorization: Bearer {intervals_access_token}
← {"api_key": "abc123..."}
```
The Worker verifies the bearer token by calling `GET https://intervals.icu/api/v1/athlete/self` with it, extracts the athlete ID from the response, derives the API key using `apiKeyForAthlete()`, and returns it. CrewNerd stores the key and uses it for all subsequent calls. From the user's perspective: tap "Connect to rownative.icu" in CrewNerd, done — no browser redirect, no manual key entry. This requires agreement with Tony Andrews on the endpoint design (see open question 1).
**Course endpoints:**
```
GET /api/courses/
@@ -238,10 +280,11 @@ Both repos are public on GitHub under a shared organisation (e.g. `rowing-course
**Cloudflare resources:**
- One Worker (free tier: 100k requests/day, 10ms CPU per invocation).
- One KV namespace: `ROWING_COURSES` (stores `apikey:{key}` → athlete_id, and `liked:{athlete_id}` → JSON array of course IDs).
- One D1 database: `rowing-courses-db` (created in Stage 1 for `user_sessions`; extended in Stage 2 for challenges and results).
- One KV namespace: `ROWING_COURSES` (stores `liked:{athlete_id}` → JSON array of course IDs).
- One GitHub App (for opening PRs from the Worker) — alternatively a fine-grained Personal Access Token scoped to the library repo only.
- Worker secret: `INTERVALS_CLIENT_ID`, `INTERVALS_CLIENT_SECRET` (needed from Stage 1 for login).
- Worker secrets: `INTERVALS_CLIENT_ID`, `INTERVALS_CLIENT_SECRET`, `TOKEN_ENCRYPTION_KEY`.
D1 is **not required in Stage 1**. Authentication state is carried in an encrypted HTTP-only cookie; liked-course state lives in KV. D1 is introduced in Stage 2 for challenges, results, standards, and the organiser flag.
**GitHub Actions workflows:**
- `validate.yml` — triggered on PRs modifying `courses/**`; runs `validate_course.py`; posts result as PR comment; auto-merges on pass.
@@ -249,11 +292,10 @@ Both repos are public on GitHub under a shared organisation (e.g. `rowing-course
**Local development:**
```bash
git clone https://github.com/rowing-courses/rowing-courses-worker
cd rowing-courses-worker
git clone https://github.com/rownative/worker
cd worker
npm install
wrangler d1 execute rowing-courses-db --local --file=migrations/001_sessions.sql
wrangler dev # Worker on localhost:8787 with local KV and D1
wrangler dev # Worker on localhost:8787 with local KV
```
No external credentials needed for local development. The Worker fetches course data from the live GitHub raw URLs by default; a `LOCAL_COURSES_PATH` env variable can redirect to a local checkout of the library repo.
@@ -282,9 +324,23 @@ my-rowsandall-courses.zip
No account data, email addresses, or activity data is included in the export.
On the new platform, an authenticated user (logged in via intervals.icu OAuth) uploads this ZIP. The Worker:
1. Parses `manifest.json`.
2. Submits owned courses as provisional PR entries (same pipeline as a normal course submission).
3. Restores the liked-course list by writing `liked:{athlete_id}` to KV.
2. For each ID in `owned`: checks whether the course ID already exists in `index.json`.
- **If the ID exists** — the course geometry was already imported via the bulk export. No PR is opened. The course is noted as "already in library".
- **If the ID does not exist** — submits the KML as a new provisional course PR (same pipeline as a normal course submission).
3. Restores the liked-course list by writing all IDs from `manifest.liked` to `liked:{athlete_id}` in KV, regardless of whether the IDs exist in the library yet. (A liked ID for a course not yet in the library is harmless — it will resolve once the course is added.)
4. Returns a summary to the user: how many owned courses were already in the library, how many new PRs were opened, and confirmation that the liked list was restored.
Example response shown to the user:
```
Migration complete:
12 owned courses already in the library ✓
1 new course submitted for review (provisional)
23 liked courses restored to your account ✓
```
This deduplication is important because the bulk export from Rowsandall runs before users migrate, so the vast majority of owned courses will already be present. Without this check, every migrating user would open duplicate PRs for courses already in the library.
Users are notified of this migration path via the Rowsandall shutdown announcement and a banner on the courses page. The export ZIP can be generated at any time before Rowsandall shuts down.
@@ -307,17 +363,18 @@ Users are notified of this migration path via the Rowsandall shutdown announceme
- [ ] Initial course data committed (migrated from Rowsandall)
**Cloudflare Worker:**
- [ ] `wrangler.toml` with KV and D1 bindings
- [ ] `migrations/001_sessions.sql` (user_sessions table)
- [ ] `wrangler.toml` with KV binding and secrets
- [ ] intervals.icu OAuth login flow (`GET /oauth/authorize`, `GET /oauth/callback`)
- [ ] Platform API key issuance and storage in KV
- [ ] Encrypted HTTP-only cookie (`rn_session`) — AES-GCM encrypt/decrypt of `{athleteId, accessToken, refreshToken, expiresAt}`
- [ ] HMAC-derived CrewNerd API key (`apiKeyForAthlete()`) — shown on user profile page
- [ ] Platform API key verification on incoming CrewNerd requests
- [ ] `GET /api/courses/` — course index with geo filtering
- [ ] `GET /api/courses/{id}/` — single course KML
- [ ] `GET /api/courses/kml/liked/` — liked courses KML bundle
- [ ] `GET /api/courses/kml/` — multi-course KML bundle
- [ ] `POST /rowers/courses/{id}/follow/` and `/unfollow/`
- [ ] `POST /api/courses/submit` — KML upload → GitHub PR
- [ ] `POST /api/courses/import-zip` — ZIP import (owned courses + liked list)
- [ ] `POST /api/courses/import-zip` — ZIP import: check each owned ID against `index.json`, open PR only for IDs not already present; restore liked list in KV unconditionally; return summary to user
- [ ] KML generation logic (port of `courses.py`: `coursetokml`, `getcoursefolder`, `crewnerdify`, `sort_coordinates_ccw`)
**GitHub Pages site:**
@@ -407,15 +464,14 @@ CREATE TABLE course_standards (
**`user_sessions` table:**
```sql
CREATE TABLE user_sessions (
session_token TEXT PRIMARY KEY,
athlete_id TEXT NOT NULL,
access_token_enc TEXT NOT NULL, -- AES-GCM encrypted
refresh_token_enc TEXT NOT NULL,
expires_at TEXT NOT NULL,
athlete_id TEXT PRIMARY KEY, -- intervals.icu athlete id
refresh_token_enc TEXT NOT NULL, -- AES-GCM encrypted, for token refresh
is_organizer INTEGER NOT NULL DEFAULT 0
);
```
Note: the access token and session state are carried in an encrypted HTTP-only cookie (set at login) rather than in D1. D1 stores only the refresh token (needed to obtain new access tokens when the cookie expires) and the `is_organizer` flag (needed for the organiser panel). The cookie approach was confirmed by David Tinker at intervals.icu as the recommended pattern.
### 2.2 Handicap scoring
The scoring logic from `rowers/scoring.py` translates directly. At result submission, the Worker:
@@ -429,9 +485,7 @@ Standard collections are uploaded as CSV (identical format to the existing Rowsa
### 2.3 GPS validation via intervals.icu OAuth
The OAuth infrastructure is already in place from Stage 1 (login). Stage 2 extends the token scope to include `ACTIVITY_READ` and uses the stored access token to fetch GPS data for course time validation.
If intervals.icu supports requesting multiple scopes in the initial grant (confirm with David Tinker — see open question 7), both `PROFILE_READ` and `ACTIVITY_READ` should be requested at Stage 1 login to avoid a re-authorisation prompt in Stage 2.
The OAuth infrastructure is already in place from Stage 1 (login). Stage 2 uses the `ACTIVITY_READ` scope already requested at Stage 1 login, and the stored refresh token in D1 to obtain fresh access tokens when the cookie has expired.
**Result submission flow:**
@@ -608,27 +662,30 @@ wrangler dev
## Open questions for developer kickoff
1. **CrewNerd base URL configurability.** Confirm with Tony Andrews (CrewNerd) whether a configurable Rowsandall base URL already exists in the app, or whether a CrewNerd release is needed before Stage 1 is useful. This affects the Stage 1 deadline and should be the first external conversation to have.
1. **CrewNerd integration design — confirm with Tony Andrews.** Two sub-questions:
a. **Auth endpoint.** The proposed UX requires no browser redirect and no manual key entry. CrewNerd already holds the user's intervals.icu bearer token from its existing intervals.icu integration. A single background call is all that is needed:
```
POST /api/auth/crewnerd
Authorization: Bearer {intervals_access_token}
← {"api_key": "abc123..."}
```
The Worker verifies the token against intervals.icu (`GET /api/v1/athlete/self`), derives the API key using `apiKeyForAthlete()`, and returns it. From the user's perspective: tap "Connect to rownative.icu" in CrewNerd, done. Confirm with Tony that (i) CrewNerd can make this call on the user's behalf and store the returned key, and (ii) users who have already connected CrewNerd to intervals.icu do not need to re-authenticate — the existing token can be reused immediately.
b. **Base URL configurability.** Confirm whether a configurable Rowsandall base URL already exists in CrewNerd, or whether a new release is needed before Stage 1 is usable. This is the Stage 1 deadline driver.
2. **intervals.icu OAuth app registration.** Confirm with David Tinker (`@david` on the intervals.icu forum) whether a community/open-source OAuth app can be registered for this project, or whether each instance operator registers separately. Also confirm the available scopes — specifically whether `PROFILE_READ` and `ACTIVITY_READ` can be requested in the same grant or require separate authorisation flows. The redirect URI will be `https://{worker-domain}/oauth/callback`.
3. **Course ID scheme.** Rowsandall uses integer primary keys. The new library uses string identifiers derived from filenames. The migration script should assign stable IDs matching the original Rowsandall IDs (e.g. `"066"` for course 66) to preserve any bookmarked course URLs and to make the user ZIP migration (see point 8) unambiguous.
3. **Course ID scheme — resolved.** Rowsandall integer IDs are preserved exactly as strings (e.g. course 66 → `courses/66.json`, `"id": "66"`). No zero-padding. This ensures liked-course migration works without a translation table.
4. **Cloudflare account structure.** Decision needed on whether to use an existing personal Cloudflare account (quick start, harder to transfer) or set up a dedicated organisation account (recommended for community maintainability, ~30 minutes overhead). Recommendation: dedicated account under a shared organisation email, with at least two account members from day one.
4. **Cloudflare account structure — resolved.** Dedicated organisation account created under the `rownative` name. Tony Andrews added as second account member.
5. **GitHub organisation name.** Needs to be chosen before any repos are created; renaming later breaks clone URLs for all contributors.
5. **GitHub organisation name — resolved.** `rownative` org created; `rownative/courses` and `rownative/worker` repos to be initialised.
6. **Standard collection library.** Should a set of canonical handicap tables (FISA masters, HOCR categories, KNRB) be included in the initial data migration, or left for organiser community upload? Including them reduces friction for the first challenge organisers to migrate.
7. **intervals.icu OAuth scope strategy.** If `PROFILE_READ` and `ACTIVITY_READ` can be combined in a single OAuth grant, both should be requested at Stage 1 login. This avoids a re-authorisation prompt when Stage 2 launches. If intervals.icu only supports one scope per grant, Stage 2 users will need to re-authorise — acceptable but slightly awkward. Confirm with David Tinker before finalising the Stage 1 OAuth implementation.
7. **intervals.icu OAuth scope strategy — resolved.** Confirmed by David Tinker: a successful token exchange is sufficient as a login indicator. The recommended approach is to encrypt the athlete ID and access token and store them in an HTTP-only cookie. Both `PROFILE_READ` and `ACTIVITY_READ` are requested in the single Stage 1 OAuth grant — no re-authorisation prompt will be needed when Stage 2 launches.
8. **Rowsandall ZIP export feature.** Build a "Download my courses" button in
the existing Rowsandall Django app, producing a ZIP of owned course KML files
plus a `manifest.json` with owned and liked course ID lists (no account data,
no activity data). This is a Rowsandall deliverable, not a new-platform
deliverable, and should be scoped and scheduled separately. The new
platform's import endpoint (parse ZIP, submit courses as provisional PRs,
restore liked list in KV) is a Stage 1 deliverable. The Rowsandall export
should be live well before the shutdown announcement so users have time to
act on it.
8. **Rowsandall ZIP export feature — resolved.** Implemented in `rowers/views/racesviews.py` as `course_export_zip_view()`. Produces a ZIP of owned course KML files plus a `manifest.json` listing owned and liked course IDs. The Rowsandall export endpoint is live; the rownative import endpoint (parse ZIP, submit courses as provisional PRs, restore liked list in KV) remains a Stage 1 deliverable.