{"version":1,"pages":[{"id":"JROK0OeueC7plv4mrb7F","title":"Introducing Lyn","pathname":"/lyn","siteSpaceId":"sitesp_FjHmB","description":"The video-based super-agential multimodal ecosystem.","breadcrumbs":[{"label":"LYN DOCS"}]},{"id":"7d30PyVJVFW029hHig6d","title":"Our Vision for Video AI and Human-Centric Agential Video","pathname":"/lyn/lyn-docs/our-vision-for-video-ai-and-human-centric-agential-video","siteSpaceId":"sitesp_FjHmB","description":"Photorealistic, human-centric synchronous video agents that can change human lives for the better.","breadcrumbs":[{"label":"LYN DOCS"}]},{"id":"m4OnfS5CG826oFR7h7zA","title":"Decentralized Video AI Layer","pathname":"/lyn/lyn-docs/decentralized-video-ai-layer","siteSpaceId":"sitesp_FjHmB","breadcrumbs":[{"label":"LYN DOCS"}]},{"id":"IOiX4VVjR6T7h9VHYn2Z","title":"Video Agent Generation: Diffusion Based Model","pathname":"/lyn/lyn-docs/video-agent-generation-diffusion-based-model","siteSpaceId":"sitesp_FjHmB","description":"A novel approach for powerful, controllable human-like video generation.","breadcrumbs":[{"label":"LYN DOCS"}]},{"id":"eETr7JCBDuFkrMRLUUin","title":"Text-to-Speech Synthesis Using Diffusion Bridge Model","pathname":"/lyn/lyn-docs/video-agent-generation-diffusion-based-model/text-to-speech-synthesis-using-diffusion-bridge-model","siteSpaceId":"sitesp_FjHmB","description":"A model that outperforms autoregressive and diffusion models for high quality output that is structured, noiseless, and quick on inference.","breadcrumbs":[{"label":"LYN DOCS"},{"label":"Video Agent Generation: Diffusion Based Model"}]},{"id":"5zfDvPGvusjYEP044iTH","title":"Real-time Conversational Generation: A Framework for Voice-driven Facial Animation","pathname":"/lyn/lyn-docs/video-agent-generation-diffusion-based-model/real-time-conversational-generation-a-framework-for-voice-driven-facial-animation","siteSpaceId":"sitesp_FjHmB","description":"A state-of-the-art approach that bridges the gap between high-quality video generation and the latency challenges of real-time interaction.","breadcrumbs":[{"label":"LYN DOCS"},{"label":"Video Agent Generation: Diffusion Based Model"}]},{"id":"c8JnYbNsXU9MrAVPSwTB","title":"Decentralized Video Agent Applications and Capabilities","pathname":"/lyn/lyn-docs/decentralized-video-agent-applications-and-capabilities","siteSpaceId":"sitesp_FjHmB","description":"On-chain video agent applications, capabilities, and their place in the new world of AI.","breadcrumbs":[{"label":"LYN DOCS"}]},{"id":"fG4bhWkht3v4ZzFssxIQ","title":"Autoregressive Modeling with Vector Quantization","pathname":"/lyn/ai-modeling-research/autoregressive-modeling-with-vector-quantization","siteSpaceId":"sitesp_FjHmB","description":"Features and approaches of the first-of-its-kind foundational video AI model powering video agents in the Lyn ecosystem.","breadcrumbs":[{"label":"AI Modeling Research"}]},{"id":"Hvvbv3KBTuvXcie3SRP6","title":"Hierarchical Spatial-Temporal Video Generation Architecture","pathname":"/lyn/ai-modeling-research/autoregressive-modeling-with-vector-quantization/hierarchical-spatial-temporal-video-generation-architecture","siteSpaceId":"sitesp_FjHmB","description":"Encoding video into multi-scale latent video tokens and decoding video tokens back into the pixel 21 domain.","breadcrumbs":[{"label":"AI Modeling Research"},{"label":"Autoregressive Modeling with Vector Quantization"}]},{"id":"RnpKBEW01C1aJjkqDxXl","title":"Efficient Autoregressive Video Generation via Token Masking","pathname":"/lyn/ai-modeling-research/autoregressive-modeling-with-vector-quantization/efficient-autoregressive-video-generation-via-token-masking","siteSpaceId":"sitesp_FjHmB","description":"Efficient modeling of complex spatial-temporal dynamics in video data.","breadcrumbs":[{"label":"AI Modeling Research"},{"label":"Autoregressive Modeling with Vector Quantization"}]},{"id":"k9TxDmAsUKYldTgVBVI0","title":"Autoregressive Text-to-Visual Generation via Hybrid Architecture","pathname":"/lyn/ai-modeling-research/autoregressive-modeling-with-vector-quantization/autoregressive-text-to-visual-generation-via-hybrid-architecture","siteSpaceId":"sitesp_FjHmB","description":"A unique hybrid architecture of Mamba and Transformer for visual generation.","breadcrumbs":[{"label":"AI Modeling Research"},{"label":"Autoregressive Modeling with Vector Quantization"}]},{"id":"BVgqgAfuQqmpsA9lKO9g","title":"From Video to Movie: Composite Video Editing and RHF for Quality","pathname":"/lyn/ai-modeling-research/from-video-to-movie-composite-video-editing-and-rhf-for-quality","siteSpaceId":"sitesp_FjHmB","description":"A new framework extending from autoregressive video generation for industry-leading video edit precision, and RHF-based generation quality.","breadcrumbs":[{"label":"AI Modeling Research"}]},{"id":"O1FKr34WNTUOB8HnH0Jz","title":"VideoGen-of-Thought","pathname":"/lyn/ai-modeling-research/from-video-to-movie-composite-video-editing-and-rhf-for-quality/videogen-of-thought","siteSpaceId":"sitesp_FjHmB","description":"A novel approach to the generation of long, consistently structured and homogenous content.","breadcrumbs":[{"label":"AI Modeling Research"},{"label":"From Video to Movie: Composite Video Editing and RHF for Quality"}]},{"id":"gjguGjBXkPbkEiUGV5NP","title":"Supercharging MLLMs and LVLMs","pathname":"/lyn/ai-modeling-research/supercharging-mllms-and-lvlms","siteSpaceId":"sitesp_FjHmB","description":"Multi-modal Robustness benchmark (MMR) and Text-relevant Visual Token Selection (TVTS) developed for a better, open video AI.","breadcrumbs":[{"label":"AI Modeling Research"}]},{"id":"tOJ4rHzciZQ1t354pN0u","title":"Everlyn's Data Pre-processing Pipeline","pathname":"/lyn/technical-designs/everlyns-data-pre-processing-pipeline","siteSpaceId":"sitesp_FjHmB","description":"A proprietary data pre-processing pipeline developed in-house for best-in-class speed and performance.","breadcrumbs":[{"label":"Technical Designs"}]},{"id":"6lIFDQZuKpYa6x1SZykG","title":"Towards Intelligent Video Captioning and Annotation","pathname":"/lyn/technical-designs/everlyns-data-pre-processing-pipeline/towards-intelligent-video-captioning-and-annotation","siteSpaceId":"sitesp_FjHmB","breadcrumbs":[{"label":"Technical Designs"},{"label":"Everlyn's Data Pre-processing Pipeline"}]},{"id":"KRzofh2ny0mgfP3cbRBS","title":"$LYN Overview","pathname":"/lyn/technical-designs/usdlyn-overview","siteSpaceId":"sitesp_FjHmB","description":"LYN token is at the foundation of the Lyn ecosystem, supporting activity and transactions for all video agents on the platform.","breadcrumbs":[{"label":"Technical Designs"}]},{"id":"kvvNZ365P3wwxE8Ndrc0","title":"Supplementary Information","pathname":"/lyn/technical-designs/supplementary-information","siteSpaceId":"sitesp_FjHmB","description":"Additional info referenced throughout the Lyn Gitbook.","breadcrumbs":[{"label":"Technical Designs"}]},{"id":"EIDHYj4HYol7B8YLaDlA","title":"Definitions","pathname":"/lyn/technical-designs/supplementary-information/definitions","siteSpaceId":"sitesp_FjHmB","description":"Definitions of the techniques and technical terminology in the research sections of the Lyn Gitbook.","breadcrumbs":[{"label":"Technical Designs"},{"label":"Supplementary Information"}]},{"id":"DmOAMpU7f435U9Qz2Hhh","title":"References","pathname":"/lyn/technical-designs/supplementary-information/references","siteSpaceId":"sitesp_FjHmB","description":"Cited works referenced throughout the Lyn Gitbook.","breadcrumbs":[{"label":"Technical Designs"},{"label":"Supplementary Information"}]}]}