From f37382d2b83c58f4f1f2b6a24190954c87ebf636 Mon Sep 17 00:00:00 2001 From: Emily Boudreaux Date: Sun, 8 Mar 2026 16:48:58 -0400 Subject: [PATCH] feat(main): commit --- .DS_Store | Bin 0 -> 10244 bytes .coverage | Bin 53248 -> 53248 bytes Dockerfile | 2 +- api/api.py | 2 + api/routers/channel.py | 157 ++- api/routers/schedule.py | 31 +- api/routers/sources.py | 178 +++ api/routers/user.py | 6 + api/tests/test_channel.py | 70 +- api/tests/test_sources.py | 292 +++++ core/management/commands/cache_upcoming.py | 52 + core/management/commands/state.py | 53 +- ...002_mediaitem_cache_expires_at_and_more.py | 45 + core/models.py | 7 + core/services/cache.py | 140 +++ core/services/scheduler.py | 260 ++-- core/services/youtube.py | 244 ++++ db.sqlite3 | Bin 561152 -> 565248 bytes frontend/src/App.jsx | 47 +- frontend/src/api.js | 69 +- frontend/src/components/ChannelTuner.jsx | 133 +- frontend/src/components/Guide.jsx | 214 +++- frontend/src/components/Settings.jsx | 846 +++++++++++++ frontend/src/index.css | 1085 ++++++++++++++++- frontend/vite.config.js | 5 + pyproject.toml | 1 + pytv/settings.py | 3 + pytv/urls.py | 5 + uv.lock | 11 + 29 files changed, 3735 insertions(+), 223 deletions(-) create mode 100644 .DS_Store create mode 100644 api/routers/sources.py create mode 100644 api/tests/test_sources.py create mode 100644 core/management/commands/cache_upcoming.py create mode 100644 core/migrations/0002_mediaitem_cache_expires_at_and_more.py create mode 100644 core/services/cache.py create mode 100644 core/services/youtube.py create mode 100644 frontend/src/components/Settings.jsx diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..ec6114f377bb0a5cb13146c5681f0b71586b66d1 GIT binary patch literal 10244 zcmeHMU2GIp6u#fIzziMe07VLR!a^bZWP$S6mOr!oQ$V1=wsczx%Wh|+1JjwZGrI*! zV`JhA2*xLk|2}CX@?b!TFPf-`J}R1Ej1NSOKM(lei^_xN&Ydl^1)kIZnVZ}>_nv#s zz4x5?&YZn>8DnV78!H)$F~(%7oN6jPZd3R;uj7goa`+%fpLzXR+s#t%Vzz&rbi{}} z5P2Z-K;(hQ1Ca;r1rJcp=0z?OGrN?3;VGb=lA>-pWbJin&q8%QOk6DCfQC&^{)P33JCogGnFDF9qRDmqjOCcQK*2Ih66x-+9K#)Gbn}kk1-eNwAS<$x)+z@F z*Vfj@W3{zwhvKopb*pOPu{En}hlUh+X7!2+#~b0^tjH>oXN*K zdR#l-=XhytT2IFA$=eyC zw5H`fbGJ!zSp~D1X7o}PEy)!t#+U}>+|*ff7c5@3s%~SVWy|(mSIV_%(`QtuY8Uy@ z@vSE;GuJ*~8GdhP#_KU1!*ULG6fDom+Gd-R=`*Jxt|(W^D`(GnxJuXWB$~=+ts{BM zKc-d7awEb~Q_iHP5h^TMs;g}t*`Q@Y!R0Gd zwe2tgK=b$(NOkoZRd2U5S<}`GMXXc5PSraK!vEUiA#+oOs;7F~zCJS13waHVy1t(; zyDj7St^KA$Te=*s{V0ufdA*sO**ZV~47pp08@y8DbEOu;^4(yNys?9<=;YHi7)D^j zoXXkddQxwfwJ&9QMV3>r2=w$?1@`6Y;;X4nEEUb_kIVx>D{!<@ou~;#WTvxKteI_N z2Uv~`veWDZc9xxIpR!BrYxX_+gN{7do&k7GnN*u8#N{UfZjFRGinG|mgwJ0spZ^9@o-sY#@y`C0FP&_6vj?SP_ z@Ba2ciP(OO-~YGV`Tzg>+sJ75kq06V+|>gpZB8~fl1TBiKUSLGwa2I)pvnuE8x}NB xp~t8{0;t396&F+Gvc<~~-cE$PVL=0R;s@$P|EE6#?tal0o&Tfr{{w&i{|9>Ke2D-6 literal 0 HcmV?d00001 diff --git a/.coverage b/.coverage index 4ff2e6d252c300eac6498f84af238f1dda0d572e..f9a18462ee38409112d273a21cfc1ac53e48ff45 100644 GIT binary patch delta 1868 zcma)+TWl0n7{_OJ&dl!I&bOD@TQ5LcDisW18*41pZiz}STR=(F6zO(pS7>T!?QUt2 z&~3pd!IF6J!Ngc1g7LwK!9;;T7ci+L7<_AAq$Nmo{_J}Lvpoa#&OcI-4U?=YX8jMYA=u`&bXW(NVZThSzfn_gu;kysa3Uj zXHR!aEWWmDU-e3Iv!+_CEE2dRaWxcKA#zVV)=mAP!t6xyNv(UdRd(n zw9YD$Y7j|%o$QXc?2fheyv<6~EJxg+mu5UkhUyGG5lzHs;a8SX>#B^^<7tV~j7U#c zOJ_&ho?X zr`D`Cs#BJrp*OQ{Z;dP1$T6(XKYSVKvZkts&E4KaYvt@Upa1rx~?0s=T-~(h+NI zi?$_V9g&5x#QM70x?}N3G!cpJX^nJrcE`xphQqdyl65&=af2*y2ds_gJxgcmH=pGx zPm>%p;n%fwuttmGlscux)o;`?<(x8ubM$fh345V5X^+`&!&Q73KGKiL8y%+{4;^o6 zpKJH@t$14LlOH;Q%Cqth@=?mukVKX>mRO{yOE49a$u~BlmCp-FHYQ4%BZE;PyBQ^01QpQa5(^+#Hb5(C5#=WuSxLQ4A0?!NP9eT7o*`-ZtB8V>apx|&|3lO= z(%e`h=7$jlDicj^)){;rNi{KY7hB1-ESkkr&cd0@4jACNg;X$vC{YXLm`7crDnJii zjr^H$c)BTWv2NP;;>+hF&D$$PZxtHkX$g}n4>GLjNntngSc@0KZqdCMcPQ*ZuBU}v zW|DmYC?}oXQ7yhy1O4zNBMT1{sQ z%7Z%nRL#JB_zfmt46ef{d_caSq8(t=P}MFsZ`O( zt*QxL@|yKDUNxu&^qL13(mWb0@zClAw_MJxZ&Zj4?(Iby@S7e!58Vq;SNatI54*I{s(?6F^r}saepSx7mJN5No zlv~fGPWL|}*{K)1cu6u=#-5GnenCDGJ>jZj$(F-F_C7^T}tcJa*cXn%e8oO>Q~ zzs!-ObR;QVltZ;aE#-o%a02$i7XB~4%-`kByvUp}Pnyq~2D{2WWX-J5m^MB!V#XGI zQU6kp>7{grSA#~VRBA2!^fQt@lu4grn2J5=Hv zr^AN4L2|vcQY8y-JynWV+sZ^;eV#j1goT?$a(9s$F2tJvme#O%yzAkY2O|ZT*no*@ zGZyRa?LHWbVYV_~eAoDpxNo;2#v3TfTaOkRH03c%at*F+i$yegs6XC^O^*k}LZnjb z@7y2nI@A*v7a~>Axxb^gH{OG{)jr9UxTlNtbidHo(Kl!}da<#VfY=({rtrHjmc@I~ z0>#9HPy8L-AyQE$n)bY{RO{=bfUPdyMJ3nJ%6NS&(s+r4g4&!5+Ib5*3FT~xPqG$c zh7}p7jD5VAZlZB*&b&dtfGK_yJ}^Jh&+Ge4W}~6&{vR>)Ps}J%+du1<|zk9jq_MC;Mf+T0*0^>R4DE zi?m<5zry03F1P~=@H?d7I$VP*Fb)^tGZ1hTUWbFw16|O@NBMc)iTWye%Tty@=&+`! ze|-DR{49TMVm_DKb^FY?<=BcG@=|bk_ zSLs`+;l!21gro*!LOCPDWqus@#?z)49{D`svy%o$ve-JDU7G#n=Z&V0QAuDTmr4ELnp7#p(DJokKJcQh;W6LezMTD$ zsUAFYFHu#Ss#{g;GR}!A+X;EQM?!kx&l<7tzExqtfD4jv9R7pfU;ssZ4FA9dIERvk z;7yo?TksQn4^t@bYt&Q+341P4EC(#$!1OyXd=A`R2Od!HafCa8ZaSdMfn_)d=nj0; cfnRgr^*B(S? 'AiringSchema': + media_path = None + if airing.media_item: + raw_path = airing.media_item.cached_file_path or airing.media_item.file_path + if raw_path: + if raw_path.startswith("http://") or raw_path.startswith("https://"): + media_path = raw_path + else: + from django.conf import settings + import os + try: + rel_path = os.path.relpath(raw_path, settings.MEDIA_ROOT) + if not rel_path.startswith("..") and not os.path.isabs(rel_path): + base = settings.MEDIA_URL.rstrip('/') + media_path = f"{base}/{rel_path}" + else: + media_path = raw_path + except ValueError: + media_path = raw_path + + return AiringSchema( + id=airing.id, + media_item_title=airing.media_item.title if airing.media_item else 'Unknown', + media_item_path=media_path, + starts_at=airing.starts_at, + ends_at=airing.ends_at, + slot_kind=airing.slot_kind, + status=airing.status, + ) + @router.get("/", response=List[ChannelSchema]) def list_channels(request): return Channel.objects.all() @@ -45,3 +107,96 @@ def create_channel(request, payload: ChannelCreateSchema): description=payload.description ) return 201, channel + +@router.patch("/{channel_id}", response=ChannelSchema) +def update_channel(request, channel_id: int, payload: ChannelUpdateSchema): + channel = get_object_or_404(Channel, id=channel_id) + for attr, value in payload.dict(exclude_unset=True).items(): + setattr(channel, attr, value) + channel.save() + return channel + +@router.delete("/{channel_id}", response={204: None}) +def delete_channel(request, channel_id: int): + channel = get_object_or_404(Channel, id=channel_id) + channel.delete() + return 204, None + +@router.get("/{channel_id}/sources", response=List[ChannelSourceRuleSchema]) +def list_channel_sources(request, channel_id: int): + channel = get_object_or_404(Channel, id=channel_id) + rules = ChannelSourceRule.objects.filter(channel=channel, media_source__isnull=False).select_related('media_source') + return [ + ChannelSourceRuleSchema( + id=r.id, + source_id=r.media_source.id, + source_name=r.media_source.name, + rule_mode=r.rule_mode, + weight=float(r.weight), + ) + for r in rules + ] + +@router.post("/{channel_id}/sources", response={201: ChannelSourceRuleSchema}) +def assign_source_to_channel(request, channel_id: int, payload: ChannelSourceAssignSchema): + channel = get_object_or_404(Channel, id=channel_id) + source = get_object_or_404(MediaSource, id=payload.source_id) + rule = ChannelSourceRule.objects.create( + channel=channel, + media_source=source, + rule_mode=payload.rule_mode, + weight=payload.weight, + ) + return 201, ChannelSourceRuleSchema( + id=rule.id, + source_id=source.id, + source_name=source.name, + rule_mode=rule.rule_mode, + weight=float(rule.weight), + ) + +@router.delete("/{channel_id}/sources/{rule_id}", response={204: None}) +def remove_source_from_channel(request, channel_id: int, rule_id: int): + rule = get_object_or_404(ChannelSourceRule, id=rule_id, channel_id=channel_id) + rule.delete() + return 204, None + +@router.get("/{channel_id}/now", response=Optional[AiringSchema]) +def channel_now_playing(request, channel_id: int): + """Return the Airing currently on-air for this channel, or null.""" + channel = get_object_or_404(Channel, id=channel_id) + # Using a 1-second buffer to handle boundary conditions smoothly + now = timezone.now() + airing = ( + Airing.objects + .filter(channel=channel, starts_at__lte=now, ends_at__gt=now) + .select_related('media_item') + .first() + ) + if airing is None: + return None + return AiringSchema.from_airing(airing) + +@router.get("/{channel_id}/airings", response=List[AiringSchema]) +def channel_airings(request, channel_id: int, hours: int = 4): + """ + Return Airings for this channel that overlap with the window: + [now - 2 hours, now + {hours} hours] + """ + channel = get_object_or_404(Channel, id=channel_id) + now = timezone.now() + window_start = now - timedelta(hours=2) # Look back 2h for context + window_end = now + timedelta(hours=hours) + + # Logic for overlap: starts_at < window_end AND ends_at > window_start + airings = ( + Airing.objects + .filter( + channel=channel, + starts_at__lt=window_end, + ends_at__gt=window_start + ) + .select_related('media_item') + .order_by('starts_at') + ) + return [AiringSchema.from_airing(a) for a in airings] diff --git a/api/routers/schedule.py b/api/routers/schedule.py index 64de92d..8d1014c 100644 --- a/api/routers/schedule.py +++ b/api/routers/schedule.py @@ -1,6 +1,6 @@ from ninja import Router, Schema from typing import List, Optional -from core.models import ScheduleTemplate, Channel +from core.models import ScheduleTemplate, Channel, ScheduleBlock from django.shortcuts import get_object_or_404 from datetime import date @@ -49,6 +49,19 @@ def create_schedule_template(request, payload: ScheduleTemplateCreateSchema): priority=payload.priority, is_active=payload.is_active ) + + # Create a default 24/7 programming block automatically to avoid + # complex block management in the UI + from datetime import time + ScheduleBlock.objects.create( + schedule_template=template, + name="Default 24/7 Block", + block_type=ScheduleBlock.BlockType.PROGRAMMING, + start_local_time=time(0, 0, 0), + end_local_time=time(23, 59, 59), + day_of_week_mask=127, + ) + return 201, template class GenerateScheduleSchema(Schema): @@ -63,3 +76,19 @@ def generate_schedule(request, channel_id: int, payload: GenerateScheduleSchema) generator = ScheduleGenerator(channel=channel) airings_created = generator.generate_for_date(payload.target_date) return {"status": "success", "airings_created": airings_created} + +@router.delete("/template/{template_id}", response={204: None}) +def delete_schedule_template(request, template_id: int): + template = get_object_or_404(ScheduleTemplate, id=template_id) + template.delete() + return 204, None + +@router.post("/generate-today/{channel_id}") +def generate_schedule_today(request, channel_id: int): + """Convenience endpoint: generates today's schedule for a channel.""" + from datetime import date + from core.services.scheduler import ScheduleGenerator + channel = get_object_or_404(Channel, id=channel_id) + generator = ScheduleGenerator(channel=channel) + airings_created = generator.generate_for_date(date.today()) + return {"status": "success", "airings_created": airings_created} diff --git a/api/routers/sources.py b/api/routers/sources.py new file mode 100644 index 0000000..e29c4bb --- /dev/null +++ b/api/routers/sources.py @@ -0,0 +1,178 @@ +""" +API router for MediaSource management (with YouTube support). + +Endpoints: + GET /api/sources/ – list all sources + POST /api/sources/ – create a new source + POST /api/sources/cache-upcoming – download videos for upcoming airings + GET /api/sources/download-status – snapshot of all YouTube item cache state + GET /api/sources/{id} – retrieve one source + DELETE /api/sources/{id} – delete a source + POST /api/sources/{id}/sync – trigger yt-dlp metadata sync + POST /api/sources/{id}/download – download a specific media item by ID + +IMPORTANT: literal paths (/cache-upcoming, /download-status) MUST be declared +before parameterised paths (/{source_id}) so Django Ninja's URL dispatcher +matches them first. +""" + +from typing import Optional, List +from datetime import datetime + +from django.shortcuts import get_object_or_404 +from ninja import Router +from pydantic import BaseModel + +from core.models import Library, MediaSource, MediaItem +from core.services.youtube import sync_source, YOUTUBE_SOURCE_TYPES +from core.services.cache import run_cache, get_download_status as _get_download_status + +router = Router(tags=["sources"]) + + +# --------------------------------------------------------------------------- +# Schemas +# --------------------------------------------------------------------------- + +class MediaSourceIn(BaseModel): + library_id: int + name: str + source_type: str # one of MediaSource.SourceType string values + uri: str + is_active: bool = True + scan_interval_minutes: Optional[int] = None + +class MediaSourceOut(BaseModel): + id: int + library_id: int + name: str + source_type: str + uri: str + is_active: bool + scan_interval_minutes: Optional[int] + last_scanned_at: Optional[datetime] + created_at: datetime + + class Config: + from_attributes = True + +class SyncResult(BaseModel): + created: int + updated: int + skipped: int + max_videos: Optional[int] = None + +class CacheRunResult(BaseModel): + pruned: int + downloaded: int + already_cached: int + failed: int + items: List[dict] + +class DownloadStatusResult(BaseModel): + total: int + cached: int + items: List[dict] + + +# --------------------------------------------------------------------------- +# Collection endpoints (no path parameters — must come FIRST) +# --------------------------------------------------------------------------- + +@router.get("/", response=list[MediaSourceOut]) +def list_sources(request): + """List all media sources across all libraries.""" + return list(MediaSource.objects.select_related("library").all()) + + +@router.post("/", response={201: MediaSourceOut}) +def create_source(request, payload: MediaSourceIn): + """Register a new media source (including YouTube channel/playlist URLs).""" + library = get_object_or_404(Library, id=payload.library_id) + source = MediaSource.objects.create( + library=library, + name=payload.name, + source_type=payload.source_type, + uri=payload.uri, + is_active=payload.is_active, + scan_interval_minutes=payload.scan_interval_minutes, + ) + return 201, source + + +@router.post("/cache-upcoming", response=CacheRunResult) +def trigger_cache_upcoming(request, hours: int = 24, prune_only: bool = False): + """ + Download YouTube videos for airings scheduled within the next `hours` hours. + Equivalent to running: python manage.py cache_upcoming --hours N + + Query params: + hours – scan window in hours (default: 24) + prune_only – if true, only delete stale cache files; skip downloads + """ + result = run_cache(hours=hours, prune_only=prune_only) + return result + + +@router.get("/download-status", response=DownloadStatusResult) +def download_status(request): + """ + Return a snapshot of all YouTube-backed MediaItems with their local + cache status (downloaded vs not downloaded). + """ + return _get_download_status() + + +# --------------------------------------------------------------------------- +# Single-item endpoints (path parameters — AFTER all literal paths) +# --------------------------------------------------------------------------- + +@router.get("/{source_id}", response=MediaSourceOut) +def get_source(request, source_id: int): + """Retrieve a single media source by ID.""" + return get_object_or_404(MediaSource, id=source_id) + + +@router.delete("/{source_id}", response={204: None}) +def delete_source(request, source_id: int): + """Delete a media source and all its associated media items.""" + source = get_object_or_404(MediaSource, id=source_id) + source.delete() + return 204, None + + +@router.post("/{source_id}/sync", response=SyncResult) +def trigger_sync(request, source_id: int, max_videos: Optional[int] = None): + """ + Trigger a yt-dlp metadata sync for a YouTube source. + Phase 1 only — video METADATA is fetched and upserted as MediaItem rows. + No video files are downloaded here. + + Query params: + max_videos – override the default cap (channel default: 50, playlist: 200) + """ + source = get_object_or_404(MediaSource, id=source_id) + if source.source_type not in YOUTUBE_SOURCE_TYPES: + from ninja.errors import HttpError + raise HttpError(400, "Source is not a YouTube source type.") + result = sync_source(source, max_videos=max_videos) + return {**result, "max_videos": max_videos} + + +@router.post("/{item_id}/download", response=dict) +def download_item(request, item_id: int): + """ + Immediately download a single YouTube MediaItem to local cache. + The item must already exist (synced) as a MediaItem in the database. + """ + from core.services.youtube import download_for_airing + item = get_object_or_404(MediaItem, id=item_id) + if not item.youtube_video_id: + from ninja.errors import HttpError + raise HttpError(400, "MediaItem is not a YouTube video.") + try: + path = download_for_airing(item) + return {"status": "downloaded", "path": str(path)} + except Exception as exc: + from ninja.errors import HttpError + raise HttpError(500, f"Download failed: {exc}") diff --git a/api/routers/user.py b/api/routers/user.py index b2642a5..f8e6b04 100644 --- a/api/routers/user.py +++ b/api/routers/user.py @@ -50,3 +50,9 @@ def update_user(request, user_id: int, payload: UserUpdateSchema): setattr(user, attr, value) user.save() return user + +@router.delete("/{user_id}", response={204: None}) +def delete_user(request, user_id: int): + user = get_object_or_404(AppUser, id=user_id) + user.delete() + return 204, None diff --git a/api/tests/test_channel.py b/api/tests/test_channel.py index 8364475..bbd6678 100644 --- a/api/tests/test_channel.py +++ b/api/tests/test_channel.py @@ -1,6 +1,9 @@ import pytest from django.test import Client -from core.models import AppUser, Library, Channel +from core.models import AppUser, Library, Channel, MediaSource, MediaItem, Airing +from django.utils import timezone +from datetime import timedelta +import uuid @pytest.fixture def client(): @@ -63,5 +66,68 @@ def test_create_channel(client, user, library): assert data["slug"] == "new-api-ch" # Verify it hit the DB - assert Channel.objects.count() == 1 assert Channel.objects.get(id=data["id"]).name == "New API Channel" + +@pytest.fixture +def media_source(db, library): + return MediaSource.objects.create( + library=library, + name="Test Source", + source_type="local_directory", + uri="/mock/test" + ) + +@pytest.fixture +def media_item_youtube(db, media_source): + return MediaItem.objects.create( + media_source=media_source, + title="YT Test Video", + item_kind="video", + runtime_seconds=600, + file_path="https://www.youtube.com/watch?v=dQw4w9WgXcQ", + ) + +@pytest.mark.django_db +def test_channel_now_playing_uncached_media_path(client, channel, media_item_youtube): + now = timezone.now() + Airing.objects.create( + channel=channel, + media_item=media_item_youtube, + starts_at=now - timedelta(minutes=5), + ends_at=now + timedelta(minutes=5), + slot_kind="program", + status="playing", + generation_batch_uuid=uuid.uuid4() + ) + + response = client.get(f"/api/channel/{channel.id}/now") + assert response.status_code == 200 + data = response.json() + assert data["media_item_title"] == "YT Test Video" + # Should use the raw file_path since there is no cached_file_path + assert data["media_item_path"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ" + +@pytest.mark.django_db +def test_channel_now_playing_cached_media_path(client, channel, media_item_youtube): + from django.conf import settings + import os + media_item_youtube.cached_file_path = os.path.join(settings.MEDIA_ROOT, "dQw4w9WgXcQ.mp4") + media_item_youtube.save() + + now = timezone.now() + Airing.objects.create( + channel=channel, + media_item=media_item_youtube, + starts_at=now - timedelta(minutes=5), + ends_at=now + timedelta(minutes=5), + slot_kind="program", + status="playing", + generation_batch_uuid=uuid.uuid4() + ) + + response = client.get(f"/api/channel/{channel.id}/now") + assert response.status_code == 200 + data = response.json() + assert data["media_item_title"] == "YT Test Video" + # Should resolve the cached_file_path to a web-accessible MEDIA_URL + assert data["media_item_path"] == "/dQw4w9WgXcQ.mp4" diff --git a/api/tests/test_sources.py b/api/tests/test_sources.py new file mode 100644 index 0000000..7a6f3ec --- /dev/null +++ b/api/tests/test_sources.py @@ -0,0 +1,292 @@ +""" +Tests for the /api/sources/ router. + +Covers: + - Listing sources + - Creating local and YouTube-type sources + - Syncing a YouTube source (mocked yt-dlp call) + - Deleting a source + - Attempting to sync a non-YouTube source (should 400) +""" + +import pytest +from unittest.mock import patch, MagicMock +from django.test import Client +from core.models import AppUser, Library, MediaSource, MediaItem + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def client(): + return Client() + + +@pytest.fixture +def user(db): + return AppUser.objects.create_user( + username="srcuser", email="src@pytv.local", password="password" + ) + + +@pytest.fixture +def library(db, user): + return Library.objects.create( + owner_user=user, + name="Source Test Library", + visibility="public", + ) + + +@pytest.fixture +def local_source(db, library): + return MediaSource.objects.create( + library=library, + name="Local Movies", + source_type=MediaSource.SourceType.LOCAL_DIRECTORY, + uri="/mnt/movies", + ) + + +@pytest.fixture +def youtube_source(db, library): + return MediaSource.objects.create( + library=library, + name="ABC News", + source_type=MediaSource.SourceType.YOUTUBE_CHANNEL, + uri="https://www.youtube.com/@ABCNews", + ) + + +# --------------------------------------------------------------------------- +# Listing +# --------------------------------------------------------------------------- + +@pytest.mark.django_db +def test_list_sources_empty(client): + """Listing when no sources exist returns an empty array.""" + response = client.get("/api/sources/") + assert response.status_code == 200 + assert response.json() == [] + + +@pytest.mark.django_db +def test_list_sources_returns_all(client, local_source, youtube_source): + response = client.get("/api/sources/") + assert response.status_code == 200 + data = response.json() + assert len(data) == 2 + names = {s["name"] for s in data} + assert names == {"Local Movies", "ABC News"} + + +# --------------------------------------------------------------------------- +# Creation +# --------------------------------------------------------------------------- + +@pytest.mark.django_db +def test_create_local_source(client, library): + payload = { + "library_id": library.id, + "name": "My Movies", + "source_type": "local_directory", + "uri": "/mnt/media/movies", + "is_active": True, + } + response = client.post( + "/api/sources/", data=payload, content_type="application/json" + ) + assert response.status_code == 201 + data = response.json() + assert data["name"] == "My Movies" + assert data["source_type"] == "local_directory" + assert MediaSource.objects.filter(id=data["id"]).exists() + + +@pytest.mark.django_db +def test_create_youtube_channel_source(client, library): + """Registering a YouTube channel URL as a media source.""" + payload = { + "library_id": library.id, + "name": "ABC News", + "source_type": "youtube_channel", + "uri": "https://www.youtube.com/@ABCNews", + "is_active": True, + } + response = client.post( + "/api/sources/", data=payload, content_type="application/json" + ) + assert response.status_code == 201 + data = response.json() + assert data["source_type"] == "youtube_channel" + assert "@ABCNews" in data["uri"] + + # Verify it's in the DB with correct type + src = MediaSource.objects.get(id=data["id"]) + assert src.source_type == MediaSource.SourceType.YOUTUBE_CHANNEL + + +@pytest.mark.django_db +def test_create_youtube_playlist_source(client, library): + """Registering a YouTube playlist URL as a media source.""" + payload = { + "library_id": library.id, + "name": "Tech Talks", + "source_type": "youtube_playlist", + "uri": "https://www.youtube.com/playlist?list=PLFgquLnL59akA2PflFpeQG9L01VFg90wS", + "is_active": True, + } + response = client.post( + "/api/sources/", data=payload, content_type="application/json" + ) + assert response.status_code == 201 + data = response.json() + assert data["source_type"] == "youtube_playlist" + + +@pytest.mark.django_db +def test_create_source_invalid_library(client): + """Creating a source with a non-existent library should 404.""" + payload = { + "library_id": 99999, + "name": "Orphan", + "source_type": "local_directory", + "uri": "/nope", + } + response = client.post( + "/api/sources/", data=payload, content_type="application/json" + ) + assert response.status_code == 404 + + +# --------------------------------------------------------------------------- +# Retrieval +# --------------------------------------------------------------------------- + +@pytest.mark.django_db +def test_get_source(client, youtube_source): + response = client.get(f"/api/sources/{youtube_source.id}") + assert response.status_code == 200 + data = response.json() + assert data["id"] == youtube_source.id + assert data["source_type"] == "youtube_channel" + + +@pytest.mark.django_db +def test_get_source_not_found(client): + response = client.get("/api/sources/99999") + assert response.status_code == 404 + + +# --------------------------------------------------------------------------- +# Deletion +# --------------------------------------------------------------------------- + +@pytest.mark.django_db +def test_delete_source(client, local_source): + source_id = local_source.id + response = client.delete(f"/api/sources/{source_id}") + assert response.status_code == 204 + assert not MediaSource.objects.filter(id=source_id).exists() + + +@pytest.mark.django_db +def test_delete_source_not_found(client): + response = client.delete("/api/sources/99999") + assert response.status_code == 404 + + +# --------------------------------------------------------------------------- +# Sync (mocked yt-dlp) +# --------------------------------------------------------------------------- + +MOCK_YT_ENTRIES = [ + { + "id": "abc123", + "title": "Breaking News: Test Story", + "duration": 180, + "thumbnail": "https://i.ytimg.com/vi/abc123/hqdefault.jpg", + "description": "A breaking news segment.", + "upload_date": "20240315", + "url": "https://www.youtube.com/watch?v=abc123", + "uploader": "ABC News", + }, + { + "id": "def456", + "title": "Weather Report: Sunny with a chance of clouds", + "duration": 90, + "thumbnail": "https://i.ytimg.com/vi/def456/hqdefault.jpg", + "description": "Evening weather.", + "upload_date": "20240316", + "url": "https://www.youtube.com/watch?v=def456", + "uploader": "ABC News", + }, +] + + +@pytest.mark.django_db +def test_sync_youtube_channel_creates_media_items(client, youtube_source): + """ + Syncing a YouTube channel should call yt-dlp (mocked) and upsert + MediaItem rows for each discovered video. + """ + with patch("core.services.youtube._extract_playlist_info", return_value=MOCK_YT_ENTRIES): + response = client.post(f"/api/sources/{youtube_source.id}/sync") + + assert response.status_code == 200 + data = response.json() + + # Two new videos → created=2 + assert data["created"] == 2 + assert data["updated"] == 0 + assert data["skipped"] == 0 + + # MediaItems should now exist in the DB + items = MediaItem.objects.filter(media_source=youtube_source) + assert items.count() == 2 + + titles = {i.title for i in items} + assert "Breaking News: Test Story" in titles + assert "Weather Report: Sunny with a chance of clouds" in titles + + # Verify youtube_video_id is populated + item = items.get(youtube_video_id="abc123") + assert item.runtime_seconds == 180 + assert item.release_year == 2024 + + +@pytest.mark.django_db +def test_sync_youtube_channel_updates_existing(client, youtube_source): + """Re-syncing the same source updates existing rows rather than duplicating.""" + with patch("core.services.youtube._extract_playlist_info", return_value=MOCK_YT_ENTRIES): + client.post(f"/api/sources/{youtube_source.id}/sync") + + # Second sync — same entries, should update not create + with patch( + "core.services.youtube._extract_playlist_info", + return_value=[{**MOCK_YT_ENTRIES[0], "title": "Updated Title"}], + ): + response = client.post(f"/api/sources/{youtube_source.id}/sync") + + assert response.status_code == 200 + data = response.json() + assert data["created"] == 0 + assert data["updated"] == 1 + + item = MediaItem.objects.get(youtube_video_id="abc123") + assert item.title == "Updated Title" + + +@pytest.mark.django_db +def test_sync_non_youtube_source_returns_400(client, local_source): + """Syncing a non-YouTube source type should return HTTP 400.""" + response = client.post(f"/api/sources/{local_source.id}/sync") + assert response.status_code == 400 + + +@pytest.mark.django_db +def test_sync_source_not_found(client): + response = client.post("/api/sources/99999/sync") + assert response.status_code == 404 diff --git a/core/management/commands/cache_upcoming.py b/core/management/commands/cache_upcoming.py new file mode 100644 index 0000000..1c84c6b --- /dev/null +++ b/core/management/commands/cache_upcoming.py @@ -0,0 +1,52 @@ +""" +management command: cache_upcoming + +Delegates to core.services.cache.run_cache() — the same logic exposed +by the API endpoint, so CLI and web UI behavior are always in sync. + +Usage: + python manage.py cache_upcoming # default: next 24 hours + python manage.py cache_upcoming --hours 48 + python manage.py cache_upcoming --prune-only +""" + +from django.core.management.base import BaseCommand +from core.services.cache import run_cache + + +class Command(BaseCommand): + help = "Download YouTube videos for upcoming airings and prune old cache files." + + def add_arguments(self, parser): + parser.add_argument( + "--hours", + type=int, + default=24, + help="How many hours ahead to scan for upcoming airings (default: 24).", + ) + parser.add_argument( + "--prune-only", + action="store_true", + default=False, + help="Only delete expired cache files; do not download anything new.", + ) + + def handle(self, *args, **options): + hours = options["hours"] + prune_only = options["prune_only"] + + self.stdout.write(f"▶ Running cache worker (window: {hours}h, prune-only: {prune_only})") + result = run_cache(hours=hours, prune_only=prune_only) + + self.stdout.write(self.style.SUCCESS(f" 🗑 Pruned: {result['pruned']}")) + self.stdout.write(self.style.SUCCESS(f" ↓ Downloaded: {result['downloaded']}")) + self.stdout.write(self.style.SUCCESS(f" ✓ Already cached: {result['already_cached']}")) + if result["failed"]: + self.stderr.write(self.style.ERROR(f" ✗ Failed: {result['failed']}")) + + for item in result["items"]: + icon = {"downloaded": "↓", "cached": "✓", "failed": "✗"}.get(item["status"], "?") + line = f" {icon} [{item['status']:10}] {item['title'][:70]}" + if item.get("error"): + line += f" — {item['error']}" + self.stdout.write(line) diff --git a/core/management/commands/state.py b/core/management/commands/state.py index 77343bc..d2ba722 100644 --- a/core/management/commands/state.py +++ b/core/management/commands/state.py @@ -1,16 +1,29 @@ from django.core.management.base import BaseCommand from core.models import AppUser, Library, Channel, MediaItem, Airing, ScheduleTemplate +from core.services.scheduler import ScheduleGenerator from django.utils import timezone -from datetime import timedelta +from datetime import timedelta, date +import textwrap class Command(BaseCommand): help = "Displays a beautifully formatted terminal dashboard of the current backend state." + def add_arguments(self, parser): + parser.add_argument('--channel', type=int, help='Inspect specific channel schedule') + parser.add_argument('--test-generate', action='store_true', help='Trigger generation for today if inspecting a channel') + def get_color(self, text, code): """Helper to wrap string in bash color codes""" return f"\033[{code}m{text}\033[0m" def handle(self, *args, **options): + channel_id = options.get('channel') + test_generate = options.get('test_generate') + + if channel_id: + self.inspect_channel(channel_id, test_generate) + return + # 1. Gather Aggregate Metrics total_users = AppUser.objects.count() total_libraries = Library.objects.count() @@ -46,7 +59,7 @@ class Command(BaseCommand): for c in channels: status_color = "1;32" if c.is_active else "1;31" status_text = "ACTIVE" if c.is_active else "INACTIVE" - self.stdout.write(f"\n 📺 [{c.channel_number or '-'}] {c.name} ({self.get_color(status_text, status_color)})") + self.stdout.write(f"\n 📺 [{c.id}] {c.name} (Ch {c.channel_number or '-'}) ({self.get_color(status_text, status_color)})") # Show templates templates = c.scheduletemplate_set.filter(is_active=True).order_by('-priority') @@ -57,4 +70,40 @@ class Command(BaseCommand): blocks_count = t.scheduleblock_set.count() self.stdout.write(f" 📄 Template: {t.name} (Priority {t.priority}) -> {blocks_count} Blocks") + self.stdout.write(f"\nUse {self.get_color('--channel ', '1;37')} to inspect detailed schedule.\n") + + def inspect_channel(self, channel_id, test_generate): + try: + channel = Channel.objects.get(id=channel_id) + except Channel.DoesNotExist: + self.stdout.write(self.get_color(f"Error: Channel {channel_id} not found.", "1;31")) + return + + if test_generate: + self.stdout.write(self.get_color(f"\nTriggering schedule generation for {channel.name}...", "1;33")) + generator = ScheduleGenerator(channel) + count = generator.generate_for_date(date.today()) + self.stdout.write(f"Done. Created {self.get_color(str(count), '1;32')} new airings.") + + now = timezone.now() + end_window = now + timedelta(hours=12) + + airings = Airing.objects.filter( + channel=channel, + ends_at__gt=now, + starts_at__lt=end_window + ).select_related('media_item').order_by('starts_at') + + self.stdout.write(self.get_color(f"\n=== Schedule for {channel.name} (Next 12h) ===", "1;34")) + + if not airings: + self.stdout.write(self.get_color(" (No airings scheduled in this window)", "1;33")) + else: + for a in airings: + time_str = f"{a.starts_at.strftime('%H:%M')} - {a.ends_at.strftime('%H:%M')}" + if a.starts_at <= now <= a.ends_at: + self.stdout.write(f" {self.get_color('▶ ON AIR', '1;32')} {self.get_color(time_str, '1;37')} | {a.media_item.title}") + else: + self.stdout.write(f" {time_str} | {a.media_item.title}") + self.stdout.write("\n") diff --git a/core/migrations/0002_mediaitem_cache_expires_at_and_more.py b/core/migrations/0002_mediaitem_cache_expires_at_and_more.py new file mode 100644 index 0000000..fb10b05 --- /dev/null +++ b/core/migrations/0002_mediaitem_cache_expires_at_and_more.py @@ -0,0 +1,45 @@ +# Generated by Django 6.0.3 on 2026-03-08 15:34 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0001_initial"), + ] + + operations = [ + migrations.AddField( + model_name="mediaitem", + name="cache_expires_at", + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name="mediaitem", + name="cached_file_path", + field=models.TextField(blank=True, null=True), + ), + migrations.AddField( + model_name="mediaitem", + name="youtube_video_id", + field=models.CharField(blank=True, db_index=True, max_length=64, null=True), + ), + migrations.AlterField( + model_name="mediasource", + name="source_type", + field=models.CharField( + choices=[ + ("local_directory", "Local Directory"), + ("network_share", "Network Share"), + ("manual_import", "Manual Import"), + ("playlist", "Playlist"), + ("stream", "Stream"), + ("api_feed", "API Feed"), + ("youtube_channel", "YouTube Channel"), + ("youtube_playlist", "YouTube Playlist"), + ], + max_length=32, + ), + ), + ] diff --git a/core/models.py b/core/models.py index 22d18a2..3e0c2a6 100644 --- a/core/models.py +++ b/core/models.py @@ -85,6 +85,8 @@ class MediaSource(models.Model): PLAYLIST = 'playlist', 'Playlist' STREAM = 'stream', 'Stream' API_FEED = 'api_feed', 'API Feed' + YOUTUBE_CHANNEL = 'youtube_channel', 'YouTube Channel' + YOUTUBE_PLAYLIST = 'youtube_playlist', 'YouTube Playlist' source_type = models.CharField(max_length=32, choices=SourceType.choices) uri = models.TextField() @@ -152,6 +154,11 @@ class MediaItem(models.Model): is_active = models.BooleanField(default=True) date_added_at = models.DateTimeField(auto_now_add=True) metadata_json = models.JSONField(default=dict) + # YouTube-specific: the video ID from yt-dlp + youtube_video_id = models.CharField(max_length=64, blank=True, null=True, db_index=True) + # Local cache path for downloaded YouTube videos (distinct from file_path which holds source URI) + cached_file_path = models.TextField(blank=True, null=True) + cache_expires_at = models.DateTimeField(blank=True, null=True) genres = models.ManyToManyField(Genre, related_name="media_items", blank=True) diff --git a/core/services/cache.py b/core/services/cache.py new file mode 100644 index 0000000..b0e79f0 --- /dev/null +++ b/core/services/cache.py @@ -0,0 +1,140 @@ +""" +Cache service — reusable download/prune logic used by both: + - python manage.py cache_upcoming + - POST /api/sources/cache-upcoming +""" + +import logging +import pathlib +from datetime import timedelta + +from django.utils import timezone + +from core.models import Airing, MediaItem, MediaSource +from core.services.youtube import download_for_airing, YOUTUBE_SOURCE_TYPES + +logger = logging.getLogger(__name__) + + +def run_cache(hours: int = 24, prune_only: bool = False) -> dict: + """ + Scan Airings in the next `hours` hours, download any uncached YouTube + videos, and prune stale local files. + + Returns a summary dict suitable for JSON serialization. + """ + now = timezone.now() + window_end = now + timedelta(hours=hours) + + # ── Prune first ──────────────────────────────────────────────────────── + pruned = _prune(now) + + if prune_only: + return {"pruned": pruned, "downloaded": 0, "already_cached": 0, "failed": 0, "items": []} + + # ── Find upcoming and currently playing YouTube-backed airings ────────── + upcoming = ( + Airing.objects + .filter(ends_at__gt=now, starts_at__lte=window_end) + .select_related("media_item__media_source") + ) + + youtube_items: dict[int, MediaItem] = {} + for airing in upcoming: + item = airing.media_item + if item.media_source and item.media_source.source_type in YOUTUBE_SOURCE_TYPES: + youtube_items[item.pk] = item + + downloaded = already_cached = failed = 0 + items_status = [] + + for item in youtube_items.values(): + # Skip if already cached + if item.cached_file_path and pathlib.Path(item.cached_file_path).exists(): + already_cached += 1 + items_status.append({ + "id": item.pk, + "title": item.title, + "status": "cached", + "path": item.cached_file_path, + }) + continue + + try: + local_path = download_for_airing(item) + downloaded += 1 + items_status.append({ + "id": item.pk, + "title": item.title, + "status": "downloaded", + "path": str(local_path), + }) + except Exception as exc: + failed += 1 + items_status.append({ + "id": item.pk, + "title": item.title, + "status": "failed", + "error": str(exc), + }) + logger.error("download_for_airing(%s) failed: %s", item.pk, exc) + + logger.info( + "run_cache(hours=%d): pruned=%d downloaded=%d cached=%d failed=%d", + hours, pruned, downloaded, already_cached, failed, + ) + return { + "pruned": pruned, + "downloaded": downloaded, + "already_cached": already_cached, + "failed": failed, + "items": items_status, + } + + +def _prune(now) -> int: + """Delete local cache files whose airings have all ended.""" + pruned = 0 + stale = MediaItem.objects.filter(cached_file_path__isnull=False).exclude( + airing__ends_at__gte=now + ) + for item in stale: + p = pathlib.Path(item.cached_file_path) + if p.exists(): + try: + p.unlink() + pruned += 1 + except OSError as exc: + logger.warning("Could not delete %s: %s", p, exc) + item.cached_file_path = None + item.cache_expires_at = None + item.save(update_fields=["cached_file_path", "cache_expires_at"]) + return pruned + + +def get_download_status() -> dict: + """ + Return a snapshot of all YouTube MediaItems and their cache status, + useful for rendering the Downloads UI. + """ + items = ( + MediaItem.objects + .filter(media_source__source_type__in=YOUTUBE_SOURCE_TYPES) + .select_related("media_source") + .order_by("media_source__name", "title") + ) + + result = [] + for item in items: + cached = bool(item.cached_file_path and pathlib.Path(item.cached_file_path).exists()) + result.append({ + "id": item.pk, + "title": item.title, + "source_name": item.media_source.name, + "source_id": item.media_source.id, + "youtube_video_id": item.youtube_video_id, + "runtime_seconds": item.runtime_seconds, + "cached": cached, + "cached_path": item.cached_file_path if cached else None, + }) + return {"items": result, "total": len(result), "cached": sum(1 for r in result if r["cached"])} diff --git a/core/services/scheduler.py b/core/services/scheduler.py index 72b040d..743dc2e 100644 --- a/core/services/scheduler.py +++ b/core/services/scheduler.py @@ -1,108 +1,210 @@ -from datetime import datetime, timedelta, date, time, timezone -from core.models import Channel, ScheduleTemplate, ScheduleBlock, Airing, MediaItem +""" +Schedule generator — respects ChannelSourceRule assignments. + +Source selection priority: + 1. If any rules with rule_mode='prefer' exist, items from those sources + are weighted much more heavily. + 2. Items from rule_mode='allow' sources fill the rest. + 3. Items from rule_mode='avoid' sources are only used as a last resort + (weight × 0.1). + 4. Items from rule_mode='block' sources are NEVER scheduled. + 5. If NO ChannelSourceRule rows exist for this channel, falls back to + the old behaviour (all items in the channel's library). +""" + import random +import uuid +from datetime import datetime, timedelta, date, timezone + +from core.models import ( + Channel, ChannelSourceRule, ScheduleTemplate, + ScheduleBlock, Airing, MediaItem, +) + class ScheduleGenerator: """ - A service that reads the latest ScheduleTemplate and Blocks for a given channel - and generates concrete Airings logic based on available matching MediaItems. + Reads ScheduleTemplate + ScheduleBlocks for a channel and fills the day + with concrete Airing rows, picking MediaItems according to the channel's + ChannelSourceRule assignments. """ - + def __init__(self, channel: Channel): self.channel = channel - + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + def generate_for_date(self, target_date: date) -> int: """ - Idempotent generation of airings for a specific date on this channel. - Returns the number of new Airings created. + Idempotent generation of airings for `target_date`. + Returns the number of new Airing rows created. """ - # 1. Get the highest priority active template valid on this date - template = ScheduleTemplate.objects.filter( - channel=self.channel, - is_active=True - ).filter( - # Start date is null or <= target_date - valid_from_date__isnull=True - ).order_by('-priority').first() - - # In a real app we'd construct complex Q objects for the valid dates, - # but for PYTV mock we will just grab the highest priority active template. + template = self._get_template() if not template: - template = ScheduleTemplate.objects.filter(channel=self.channel, is_active=True).order_by('-priority').first() - if not template: - return 0 - - # 2. Extract day of week mask - # Python weekday: 0=Monday, 6=Sunday - # Our mask: bit 0 = Monday, bit 6 = Sunday + return 0 + target_weekday_bit = 1 << target_date.weekday() - blocks = template.scheduleblock_set.all() airings_created = 0 - + for block in blocks: - # Check if block runs on this day if not (block.day_of_week_mask & target_weekday_bit): continue - - # Naive time combining mapping local time to UTC timeline without specific tz logic for simplicity now + start_dt = datetime.combine(target_date, block.start_local_time, tzinfo=timezone.utc) - end_dt = datetime.combine(target_date, block.end_local_time, tzinfo=timezone.utc) - - # If the block wraps past midnight (e.g. 23:00 to 02:00) + end_dt = datetime.combine(target_date, block.end_local_time, tzinfo=timezone.utc) + + # Midnight-wrap support (e.g. 23:00–02:00) if end_dt <= start_dt: end_dt += timedelta(days=1) - - # Clear existing airings in this window to allow idempotency + + # Clear existing airings in this window (idempotency) Airing.objects.filter( channel=self.channel, starts_at__gte=start_dt, - starts_at__lt=end_dt + starts_at__lt=end_dt, ).delete() - - # 3. Pull matching Media Items - # Simplistic matching: pull items from library matching the block's genre - items_query = MediaItem.objects.filter(media_source__library=self.channel.library) - if block.default_genre: - items_query = items_query.filter(genres=block.default_genre) - - available_items = list(items_query.exclude(item_kind="bumper")) + + available_items = self._get_weighted_items(block) if not available_items: continue - # Shuffle randomly for basic scheduling variety - random.shuffle(available_items) - - # 4. Fill the block - current_cursor = start_dt - item_index = 0 - - while current_cursor < end_dt and item_index < len(available_items): - item = available_items[item_index] - duration = timedelta(seconds=item.runtime_seconds or 3600) - - # Check if this item fits - if current_cursor + duration > end_dt: - # Item doesn't strictly fit, but we'll squeeze it in and break if needed - # Real systems pad this out or trim the slot. - pass - - import uuid - Airing.objects.create( - channel=self.channel, - schedule_template=template, - schedule_block=block, - media_item=item, - starts_at=current_cursor, - ends_at=current_cursor + duration, - slot_kind="program", - status="scheduled", - source_reason="template", - generation_batch_uuid=uuid.uuid4() - ) - - current_cursor += duration - item_index += 1 - airings_created += 1 + airings_created += self._fill_block( + template, block, start_dt, end_dt, available_items + ) return airings_created + + # ------------------------------------------------------------------ + # Helpers + # ------------------------------------------------------------------ + + def _get_template(self): + """Pick the highest-priority active ScheduleTemplate for this channel.""" + qs = ScheduleTemplate.objects.filter( + channel=self.channel, is_active=True + ).order_by('-priority') + return qs.first() + + def _get_weighted_items(self, block: ScheduleBlock) -> list: + """ + Build a weighted pool of MediaItems respecting ChannelSourceRule. + + Returns a flat list with items duplicated according to their effective + weight (rounded to nearest int, min 1) so random.choice() gives the + right probability distribution without needing numpy. + """ + rules = list( + ChannelSourceRule.objects.filter(channel=self.channel) + .select_related('media_source') + ) + + if rules: + # ── Rules exist: build filtered + weighted pool ─────────────── + allowed_source_ids = set() # allow + prefer + blocked_source_ids = set() # block + avoid_source_ids = set() # avoid + source_weights: dict[int, float] = {} + + for rule in rules: + sid = rule.media_source_id + mode = rule.rule_mode + w = float(rule.weight or 1.0) + + if mode == 'block': + blocked_source_ids.add(sid) + elif mode == 'avoid': + avoid_source_ids.add(sid) + source_weights[sid] = w * 0.1 # heavily discounted + elif mode == 'prefer': + allowed_source_ids.add(sid) + source_weights[sid] = w * 3.0 # boosted + else: # 'allow' + allowed_source_ids.add(sid) + source_weights[sid] = w + + # Build base queryset from allowed + avoid sources (not blocked) + eligible_source_ids = (allowed_source_ids | avoid_source_ids) - blocked_source_ids + + if not eligible_source_ids: + return [] + + base_qs = MediaItem.objects.filter( + media_source_id__in=eligible_source_ids, + is_active=True, + ).exclude(item_kind='bumper').select_related('media_source') + + else: + # ── No rules: fall back to full library (old behaviour) ──────── + base_qs = MediaItem.objects.filter( + media_source__library=self.channel.library, + is_active=True, + ).exclude(item_kind='bumper') + source_weights = {} + + # Optionally filter by genre if block specifies one + if block.default_genre: + base_qs = base_qs.filter(genres=block.default_genre) + + items = list(base_qs) + if not items: + return [] + + if not source_weights: + # No weight information — plain shuffle + random.shuffle(items) + return items + + # Build weighted list: each item appears ⌈weight⌉ times + weighted: list[MediaItem] = [] + for item in items: + w = source_weights.get(item.media_source_id, 1.0) + copies = max(1, round(w)) + weighted.extend([item] * copies) + + random.shuffle(weighted) + return weighted + + def _fill_block( + self, + template: ScheduleTemplate, + block: ScheduleBlock, + start_dt: datetime, + end_dt: datetime, + items: list, + ) -> int: + """Fill start_dt→end_dt with sequential Airings, cycling through items.""" + cursor = start_dt + idx = 0 + created = 0 + batch = uuid.uuid4() + + while cursor < end_dt: + item = items[idx % len(items)] + idx += 1 + + duration = timedelta(seconds=max(item.runtime_seconds or 1800, 1)) + + # Don't let a single item overshoot the end by more than its own length + if cursor + duration > end_dt + timedelta(hours=1): + break + + Airing.objects.create( + channel=self.channel, + schedule_template=template, + schedule_block=block, + media_item=item, + starts_at=cursor, + ends_at=cursor + duration, + slot_kind="program", + status="scheduled", + source_reason="template", + generation_batch_uuid=batch, + ) + + cursor += duration + created += 1 + + return created diff --git a/core/services/youtube.py b/core/services/youtube.py new file mode 100644 index 0000000..12a5354 --- /dev/null +++ b/core/services/youtube.py @@ -0,0 +1,244 @@ +""" +YouTube source sync service. + +Two-phase design: + Phase 1 — METADATA ONLY (sync_source): + Crawls a YouTube channel or playlist and upserts MediaItem rows with + title, duration, thumbnail etc. No video files are downloaded. + A max_videos cap keeps this fast for large channels. + + Phase 2 — DOWNLOAD ON DEMAND (download_for_airing): + Called only by `python manage.py cache_upcoming` immediately before + a scheduled Airing. Downloads only the specific video needed. +""" + +import logging +import os +from pathlib import Path + +import yt_dlp +from django.conf import settings +from django.utils import timezone + +from core.models import MediaItem, MediaSource + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# helpers +# --------------------------------------------------------------------------- + +YOUTUBE_SOURCE_TYPES = { + MediaSource.SourceType.YOUTUBE_CHANNEL, + MediaSource.SourceType.YOUTUBE_PLAYLIST, +} + + +def _cache_dir() -> Path: + """Return (and create) the directory where downloaded videos are stored.""" + root = Path(getattr(settings, "MEDIA_ROOT", "/tmp/pytv_cache")) + root.mkdir(parents=True, exist_ok=True) + return root + + +# --------------------------------------------------------------------------- +# metadata extraction (no download) +# --------------------------------------------------------------------------- + +def _extract_playlist_info(url: str, max_videos: int | None = None) -> list[dict]: + """ + Use yt-dlp to extract metadata for up to `max_videos` videos in a + channel/playlist without downloading any files. + + `extract_flat=True` is crucial — it fetches only a lightweight index + (title, id, duration) rather than resolving full stream URLs, which + makes crawling large channels orders of magnitude faster. + + Returns a list of yt-dlp info dicts (most-recent first for channels). + """ + ydl_opts = { + "quiet": True, + "no_warnings": True, + "extract_flat": True, # metadata only — NO stream/download URLs + "ignoreerrors": True, + } + if max_videos is not None: + # yt-dlp uses 1-based playlist indices; playlistend limits how many + # entries are fetched from the source before returning. + ydl_opts["playlistend"] = max_videos + + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + info = ydl.extract_info(url, download=False) + + if info is None: + return [] + + # Both channels and playlists wrap entries in an "entries" key. + entries = info.get("entries") or [] + # Flatten one extra level for channels (channel -> playlist -> entries) + flat: list[dict] = [] + for entry in entries: + if entry is None: + continue + if "entries" in entry: # nested playlist + flat.extend(e for e in entry["entries"] if e) + else: + flat.append(entry) + return flat + + +# --------------------------------------------------------------------------- +# public API +# --------------------------------------------------------------------------- + +def sync_source(media_source: MediaSource, max_videos: int | None = None) -> dict: + """ + Phase 1: Metadata-only sync. + + Crawls a YouTube channel/playlist and upserts MediaItem rows for each + discovered video. No video files are ever downloaded here. + + Args: + media_source: The MediaSource to sync. + max_videos: Maximum number of videos to import. When None the + defaults are applied: + - youtube_channel → 50 (channels can have 10k+ videos) + - youtube_playlist → 200 (playlists are usually curated) + + Returns: + {"created": int, "updated": int, "skipped": int} + """ + if media_source.source_type not in YOUTUBE_SOURCE_TYPES: + raise ValueError(f"MediaSource {media_source.id} is not a YouTube source.") + + # Apply sensible defaults per source type + if max_videos is None: + if media_source.source_type == MediaSource.SourceType.YOUTUBE_CHANNEL: + max_videos = 50 + else: + max_videos = 200 + + entries = _extract_playlist_info(media_source.uri, max_videos=max_videos) + created = updated = skipped = 0 + + for entry in entries: + video_id = entry.get("id") + if not video_id: + skipped += 1 + continue + + title = entry.get("title") or f"YouTube Video {video_id}" + duration = entry.get("duration") or 0 # seconds, may be None for live + thumbnail = entry.get("thumbnail") or "" + description = entry.get("description") or "" + release_year = None + upload_date = entry.get("upload_date") # "YYYYMMDD" + if upload_date and len(upload_date) >= 4: + try: + release_year = int(upload_date[:4]) + except ValueError: + pass + + # Store the YouTube watch URL in file_path so the scheduler can + # reference it. The ACTUAL video file will only be downloaded when + # `cache_upcoming` runs before the airing. + video_url = entry.get("url") or f"https://www.youtube.com/watch?v={video_id}" + + obj, was_created = MediaItem.objects.update_or_create( + media_source=media_source, + youtube_video_id=video_id, + defaults={ + "title": title, + "item_kind": MediaItem.ItemKind.MOVIE, + "runtime_seconds": max(int(duration), 1), + "file_path": video_url, + "thumbnail_path": thumbnail, + "description": description, + "release_year": release_year, + "metadata_json": { + "yt_id": video_id, + "yt_url": video_url, + "uploader": entry.get("uploader", ""), + }, + "is_active": True, + }, + ) + + if was_created: + created += 1 + else: + updated += 1 + + # Update last-scanned timestamp + media_source.last_scanned_at = timezone.now() + media_source.save(update_fields=["last_scanned_at"]) + + logger.info( + "sync_source(%s): created=%d updated=%d skipped=%d (limit=%s)", + media_source.id, + created, + updated, + skipped, + max_videos, + ) + return {"created": created, "updated": updated, "skipped": skipped} + + +def download_for_airing(media_item: MediaItem) -> Path: + """ + Download a YouTube video to the local cache so it can be served + directly without network dependency at airing time. + + Returns the local Path of the downloaded file. + Raises RuntimeError if the download fails. + """ + video_id = media_item.youtube_video_id + if not video_id: + raise ValueError(f"MediaItem {media_item.id} has no youtube_video_id.") + + cache_dir = _cache_dir() + # Use video_id so we can detect already-cached files quickly. + output_template = str(cache_dir / f"{video_id}.%(ext)s") + + # Check if already cached and not expired + if media_item.cached_file_path: + existing = Path(media_item.cached_file_path) + if existing.exists(): + logger.info("cache hit: %s already at %s", video_id, existing) + return existing + + ydl_opts = { + "quiet": True, + "no_warnings": True, + "outtmpl": output_template, + # Only request pre-muxed (progressive) formats — no separate video+audio + # streams that would require ffmpeg to merge. Falls back through: + # 1. Best pre-muxed mp4 up to 1080p + # 2. Any pre-muxed mp4 + # 3. Any pre-muxed webm + # 4. Anything pre-muxed (no merger needed) + "format": "best[ext=mp4][height<=1080]/best[ext=mp4]/best[ext=webm]/best", + } + + url = media_item.file_path # URL stored here by sync_source + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + info = ydl.extract_info(url, download=True) + + if info is None: + raise RuntimeError(f"yt-dlp returned no info for {url}") + + downloaded_path = Path(ydl.prepare_filename(info)) + if not downloaded_path.exists(): + # yt-dlp may have merged to .mp4 even if the template said otherwise + mp4_path = downloaded_path.with_suffix(".mp4") + if mp4_path.exists(): + downloaded_path = mp4_path + else: + raise RuntimeError(f"Expected download at {downloaded_path} but file not found.") + + # Persist the cache location on the model + media_item.cached_file_path = str(downloaded_path) + media_item.save(update_fields=["cached_file_path"]) + + logger.info("downloaded %s -> %s", video_id, downloaded_path) + return downloaded_path diff --git a/db.sqlite3 b/db.sqlite3 index 864424f0f6f67013911d4fd31e08e49677189d81..dc12785fc5eba3ce300a89786488d338fbceb6a0 100644 GIT binary patch delta 5016 zcmchbe{36P8OQIZFK5U1*ojLPLe^VWM5AWO{qo&8o3uq}40UY>lVFwDduj3*(%RFMd#sarRE$hTJ@r} zXcv#5`xHshZ;{knBvX|2S=pFX^V0=Q*7Mrh&aunP&_{`p^vg;3HvCa=;9UAo>6h1X z;&;>2?@Q7$iGQeg6h*d|7o5_WoC@KD#-yu{lF3)m<4_CEy+SJNy;ifY;#n@EiCA`~)t-i||AE z9()U)ht;+0p+~`EXr9brGmOm;Hd$;k*Z?*HHfd}Iu}NW*#D>Ra02>aQ1U76Ep;iu;WqPB!k>gE*Pc@3p1Vf(Ib}fExUO5ckWoDh5i&{n&{< zgE7Apf~KN=Pn1H^Wb_MB3Q75xUr_>}fw(^e;^O`=qlsQW0;MI1eR5SopLLO9GP}0d z4_-$Kw)I1^=U}gJAy!nlj-YYj6gkLKk8ok0dV~wp)GM4G2+<>4C}@vxp`bm&h53sM zCx@WD!iA(g!iA(g!i7_~N4PL^J;H@R+X#o2$Q~q|zu-V5;4SzA{0uIj#eEK*fK%`p zsDq1^;9j^5ra?r4y)AfYdMDYcG=!m;Aq!KX{rz1$Pr(fOU8p72lu3kKp^L_OHUTs06+Ve}jK^ z%HJ{g6fT@OmL^GR*tbCy_AfKO4eGFe85CmwGN{D;VJ|?B)lm&>0hVs9ei={_SAZ6D*04$l>Y*s7&yVb#Z^%Mo8!DjB9|sqb!_MO z{y84i`E5#bw=0~w)!NeGnsuZ-yHLNZE?9M^WLvsXHVdYq>6#*!3MENX^984jjHH`J zJ5VD5h-c6w^p#J1E`ypbUm} zr~C1+ME%z&UKdM$6vHqjrom-8BvE}3wci7~NkX`HA@R1&%2h}-IggUUL0^=x3v{D| z4P!eL56@b|k~CY+mt|MhG{Y%dX2~eqZpqB&ZM7_G(O4~gbVbo9x{(k%C^3D)#?$?H zNY_U3f{mcvT0qx^l1DR(ZgA-iNlc%xvviXLaGumgxjrPCZsMd2c2HtT3AaljO0fPy zyz7i2=_*P}szVY@ioZeg(8TS|%DXIO-F8jOPzpxLRW(gDO<8g@+f;R}WXO3V5~{P3 z(R~s{H4@1VN-HT|@E+0H6Lz7r_K*FNa*ZgODpL)PZ;^O&d@L)yOSuJiok=7^p(xaU z23jl#h41tqR9C}h)0LFgTR}e*%{5d@b`{$#yPE161uc)(bfKKjONLo+%&t&Dr49Oz zTu{(dNpO8hde=`2?md#k`;Uy4GOGEBz9ix1zUx1N+K>NYa!n-Oc1iI&p{%G1+eoli zA&J*~C4~DQNOoUc9-l-q@+!B!!Z7CrHoZl_Nf;N_gpZ`R&^GsCI&ZUC;->WYI5{wO z>l}|aTl_v$)Wrjp!}Imlgprh>LwQH%mtA+kXH+RT3Qr8Y}L#2%hgV&+a}&!op>fH2J>{5n2g5TU$JfLxcEt{R&nab<**jEf=1x( z>*F2E1qJ7_bDH?U_>hEKr#{wCAkw6gxDl^OCkpIP7fyH*fe~~l z5qpBBc|N&8R>^%exAR~eUZD9y`OlgdE;v0 zTM35z2u)S zwGXj~$C{1%nDNoRw>RZyxEr$ZuO~L9p5k74;{*78GYOXn_!s;Gy>tErz0q8T-@>bC kqFh2EtwHnBB_=Bp_)v(zhe8C-KYR^I3B)M;MYjgE-K4zc9?#dd&s6Sn91KXPEd&60jr$?`3cVg+#Y}B59a}~Sa zcA1TAEzHw5 { - if (!showGuide) return; - const handleClose = (e) => { - if (['Escape', 'Backspace', 'Enter'].includes(e.key)) { + const handleKey = (e) => { + // 'S' key opens settings (when nothing else is open) + if (e.key === 's' && !showGuide && !showSettings) { + setShowSettings(true); + } + // Escape/Backspace closes whatever is open + if (['Escape', 'Backspace'].includes(e.key)) { setShowGuide(false); + setShowSettings(false); } }; - window.addEventListener('keydown', handleClose); - return () => window.removeEventListener('keydown', handleClose); - }, [showGuide]); + window.addEventListener('keydown', handleKey); + return () => window.removeEventListener('keydown', handleKey); + }, [showGuide, showSettings]); return ( <> - {/* - The ChannelTuner always remains mounted in the background - so we don't drop video connections while browsing the guide. - */} + {/* ChannelTuner always stays mounted to preserve buffering */} setShowGuide(!showGuide)} /> - - {showGuide && setShowGuide(false)} onSelectChannel={(id) => { console.log("Tuning to", id); setShowGuide(false); }} />} + + {showGuide && ( + setShowGuide(false)} + onSelectChannel={(id) => { console.log('Tuning to', id); setShowGuide(false); }} + /> + )} + + {showSettings && setShowSettings(false)} />} + + {/* Gear button — visible only when nothing else is overlaid */} + {!showGuide && !showSettings && ( + + )} ); } diff --git a/frontend/src/api.js b/frontend/src/api.js index 93a8d49..fe3e39c 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1,24 +1,63 @@ import axios from 'axios'; -// The base URL relies on the Vite proxy in development, -// and same-origin in production. const apiClient = axios.create({ baseURL: '/api', - headers: { - 'Content-Type': 'application/json', - }, + headers: { 'Content-Type': 'application/json' }, }); -export const fetchChannels = async () => { - const response = await apiClient.get('/channel/'); - return response.data; +// ── Channels ────────────────────────────────────────────────────────────── +export const fetchChannels = async () => (await apiClient.get('/channel/')).data; +export const createChannel = async (payload) => (await apiClient.post('/channel/', payload)).data; +export const updateChannel = async (id, payload) => (await apiClient.patch(`/channel/${id}`, payload)).data; +export const deleteChannel = async (id) => { await apiClient.delete(`/channel/${id}`); }; + +// Channel program data +export const fetchChannelNow = async (channelId) => + (await apiClient.get(`/channel/${channelId}/now`)).data; +export const fetchChannelAirings = async (channelId, hours = 4) => + (await apiClient.get(`/channel/${channelId}/airings?hours=${hours}`)).data; + +// Channel ↔ Source assignments +export const fetchChannelSources = async (channelId) => + (await apiClient.get(`/channel/${channelId}/sources`)).data; +export const assignSourceToChannel = async (channelId, payload) => + (await apiClient.post(`/channel/${channelId}/sources`, payload)).data; +export const removeSourceFromChannel = async (channelId, ruleId) => { + await apiClient.delete(`/channel/${channelId}/sources/${ruleId}`); }; -// If a channel is selected, we can load its upcoming airings -export const fetchScheduleGenerations = async (channelId) => { - // We can trigger an immediate generation for the day to ensure there's data - const response = await apiClient.post(`/schedule/generate/${channelId}`); - return response.data; -}; +// ── Schedule ────────────────────────────────────────────────────────────── +export const fetchTemplates = async () => (await apiClient.get('/schedule/template/')).data; +export const createTemplate = async (payload) => + (await apiClient.post('/schedule/template/', payload)).data; +export const deleteTemplate = async (id) => { await apiClient.delete(`/schedule/template/${id}`); }; +export const generateScheduleToday = async (channelId) => + (await apiClient.post(`/schedule/generate-today/${channelId}`)).data; -// Future logic can query specific lists of Airings here... +// Legacy – used by guide +export const fetchScheduleGenerations = async (channelId) => + (await apiClient.post(`/schedule/generate/${channelId}`)).data; + +// ── Media Sources (YouTube / local) ─────────────────────────────────────── +export const fetchSources = async () => (await apiClient.get('/sources/')).data; +export const createSource = async (payload) => (await apiClient.post('/sources/', payload)).data; +export const syncSource = async (sourceId, maxVideos) => { + const url = maxVideos ? `/sources/${sourceId}/sync?max_videos=${maxVideos}` : `/sources/${sourceId}/sync`; + return (await apiClient.post(url)).data; +}; +export const deleteSource = async (sourceId) => { await apiClient.delete(`/sources/${sourceId}`); }; + +// Download controls +export const fetchDownloadStatus = async () => (await apiClient.get('/sources/download-status')).data; +export const triggerCacheUpcoming = async (hours = 24, pruneOnly = false) => + (await apiClient.post(`/sources/cache-upcoming?hours=${hours}&prune_only=${pruneOnly}`)).data; +export const downloadItem = async (itemId) => (await apiClient.post(`/sources/${itemId}/download`)).data; + +// ── Libraries ───────────────────────────────────────────────────────────── +export const fetchLibraries = async () => (await apiClient.get('/library/')).data; + +// ── Users ───────────────────────────────────────────────────────────────── +export const fetchUsers = async () => (await apiClient.get('/user/')).data; +export const createUser = async (payload) => (await apiClient.post('/user/', payload)).data; +export const updateUser = async (id, payload) => (await apiClient.patch(`/user/${id}`, payload)).data; +export const deleteUser = async (id) => { await apiClient.delete(`/user/${id}`); }; diff --git a/frontend/src/components/ChannelTuner.jsx b/frontend/src/components/ChannelTuner.jsx index 66531c8..eaac1af 100644 --- a/frontend/src/components/ChannelTuner.jsx +++ b/frontend/src/components/ChannelTuner.jsx @@ -1,6 +1,6 @@ -import React, { useState, useEffect, useRef } from 'react'; +import React, { useState, useEffect, useRef, useCallback } from 'react'; import { useRemoteControl } from '../hooks/useRemoteControl'; -import { fetchChannels } from '../api'; +import { fetchChannels, fetchChannelNow } from '../api'; const FALLBACK_VIDEOS = [ 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4', @@ -13,6 +13,8 @@ export default function ChannelTuner({ onOpenGuide }) { const [loading, setLoading] = useState(true); const [currentIndex, setCurrentIndex] = useState(0); const [showOSD, setShowOSD] = useState(true); + const [showDebug, setShowDebug] = useState(false); + const [nowPlaying, setNowPlaying] = useState({}); // { channelId: airingData } const osdTimerRef = useRef(null); // The 3 buffer indices @@ -22,11 +24,11 @@ export default function ChannelTuner({ onOpenGuide }) { const prevIndex = getPrevIndex(currentIndex); const nextIndex = getNextIndex(currentIndex); - const triggerOSD = () => { + const triggerOSD = useCallback(() => { setShowOSD(true); if (osdTimerRef.current) clearTimeout(osdTimerRef.current); osdTimerRef.current = setTimeout(() => setShowOSD(false), 5000); - }; + }, []); const wrapChannelUp = () => { setCurrentIndex(getNextIndex); @@ -42,89 +44,166 @@ export default function ChannelTuner({ onOpenGuide }) { onChannelUp: wrapChannelUp, onChannelDown: wrapChannelDown, onSelect: triggerOSD, - onBack: onOpenGuide // Often on TVs 'Menu' or 'Back' opens Guide/App list + onBack: onOpenGuide }); + // Debug Info Toggle + useEffect(() => { + const handleKeyDown = (e) => { + // Ignore if user is typing in an input + if (document.activeElement.tagName === 'INPUT' || document.activeElement.tagName === 'TEXTAREA') return; + if (e.key === 'i' || e.key === 'I') { + setShowDebug(prev => !prev); + } + }; + window.addEventListener('keydown', handleKeyDown); + return () => window.removeEventListener('keydown', handleKeyDown); + }, []); + // Fetch channels from Django API useEffect(() => { fetchChannels().then(data => { - // If db gives us channels, pad them with a fallback video stream based on index const mapped = data.map((ch, idx) => ({ ...ch, - file: FALLBACK_VIDEOS[idx % FALLBACK_VIDEOS.length] + fallbackFile: FALLBACK_VIDEOS[idx % FALLBACK_VIDEOS.length] })); if (mapped.length === 0) { - // Fallback if db is completely empty - mapped.push({ id: 99, channel_number: '99', name: 'Default Local feed', file: FALLBACK_VIDEOS[0] }); + mapped.push({ id: 99, channel_number: '99', name: 'Default Feed', fallbackFile: FALLBACK_VIDEOS[0] }); } setChannels(mapped); setLoading(false); }).catch(err => { console.error(err); - setChannels([{ id: 99, channel_number: '99', name: 'Error Offline', file: FALLBACK_VIDEOS[0] }]); + setChannels([{ id: 99, channel_number: '99', name: 'Offline', fallbackFile: FALLBACK_VIDEOS[0] }]); setLoading(false); }); }, []); + // Fetch "Now Playing" metadata for the current channel group whenever currentIndex changes + useEffect(() => { + if (channels.length === 0) return; + + const activeIndices = [currentIndex, prevIndex, nextIndex]; + activeIndices.forEach(idx => { + const chan = channels[idx]; + fetchChannelNow(chan.id).then(data => { + setNowPlaying(prev => ({ ...prev, [chan.id]: data })); + }).catch(() => { + setNowPlaying(prev => ({ ...prev, [chan.id]: null })); + }); + }); + }, [currentIndex, channels, prevIndex, nextIndex]); + // Initial OSD hide useEffect(() => { if (!loading) triggerOSD(); return () => clearTimeout(osdTimerRef.current); - }, [loading]); + }, [loading, triggerOSD]); - if (loading) { + if (loading || channels.length === 0) { return
Connecting to PYTV Backend...
; } + const currentChan = channels[currentIndex]; + const airing = nowPlaying[currentChan.id]; + return (
- {/* - We map over all channels, but selectively apply 'playing' or 'buffering' - classes to only the surrounding 3 elements. The rest are completely unrendered - to save immense DOM and memory resources. - */} {channels.map((chan, index) => { const isCurrent = index === currentIndex; const isPrev = index === prevIndex; const isNext = index === nextIndex; - // Only mount the node if it's one of the 3 active buffers if (!isCurrent && !isPrev && !isNext) return null; let stateClass = 'buffering'; if (isCurrent) stateClass = 'playing'; + // Use the current airing's media item file if available, else fallback + const currentAiring = nowPlaying[chan.id]; + let videoSrc = chan.fallbackFile; + if (currentAiring && currentAiring.media_item_path) { + if (currentAiring.media_item_path.startsWith('http')) { + videoSrc = currentAiring.media_item_path; + } else { + // Django serves cached media at root, Vite proxies /media to root + // Remove leading slashes or /media/ to avoid double slashes like /media//mock + const cleanPath = currentAiring.media_item_path.replace(/^\/?(media)?\/*/, ''); + videoSrc = `/media/${cleanPath}`; + } + } + return (