Files
html/notebooks/WEVAL_LLaMA_Factory.ipynb
2026-04-12 22:57:03 +02:00

1 line
670 B
Plaintext

{"nbformat": 4, "nbformat_minor": 0, "metadata": {"colab": {"provenance": [], "gpuType": "T4"}, "kernelspec": {"name": "python3", "display_name": "Python 3"}, "accelerator": "GPU"}, "cells": [{"cell_type": "markdown", "source": ["# WEVAL LLaMA-Factory\\n", "Web UI fine-tuning 100+ models\\n", "**Free GPU: T4 on Colab**"]}, {"cell_type": "code", "source": ["!git clone https://github.com/hiyouga/LLaMA-Factory\\n", "%cd LLaMA-Factory\\n", "!pip install -e '.[torch,metrics]'\\n", "print('Installed!')"], "execution_count": null, "outputs": []}, {"cell_type": "code", "source": ["# Launch Web UI\\n", "!llamafactory-cli webui"], "execution_count": null, "outputs": []}]}