<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Training on homelab</title>
    <link>https://homelab.nbkelley.com/tags/training/</link>
    <description>Recent content in Training on homelab</description>
    <generator>Hugo</generator>
    <language>en</language>
    <lastBuildDate>Thu, 23 Apr 2026 00:00:00 +0000</lastBuildDate>
    <atom:link href="https://homelab.nbkelley.com/tags/training/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>Local Model Training &amp; Fine-Tuning Guide</title>
      <link>https://homelab.nbkelley.com/docs/ai/local-model-training/</link>
      <pubDate>Thu, 23 Apr 2026 00:00:00 +0000</pubDate>
      <guid>https://homelab.nbkelley.com/docs/ai/local-model-training/</guid>
      <description>&lt;h1 id=&#34;local-model-training--fine-tuning-guide&#34;&gt;Local Model Training &amp;amp; Fine-Tuning Guide&lt;a class=&#34;anchor&#34; href=&#34;#local-model-training--fine-tuning-guide&#34;&gt;#&lt;/a&gt;&lt;/h1&gt;&#xA;&lt;h2 id=&#34;what-was-established&#34;&gt;What Was Established&lt;a class=&#34;anchor&#34; href=&#34;#what-was-established&#34;&gt;#&lt;/a&gt;&lt;/h2&gt;&#xA;&lt;p&gt;Guide for fine-tuning local LLMs (DeepSeek) using Hugging Face &lt;code&gt;transformers&lt;/code&gt;, with emphasis on VRAM-efficient techniques for single-GPU setups.&lt;/p&gt;&#xA;&lt;h2 id=&#34;key-decisions&#34;&gt;Key Decisions&lt;a class=&#34;anchor&#34; href=&#34;#key-decisions&#34;&gt;#&lt;/a&gt;&lt;/h2&gt;&#xA;&lt;ul&gt;&#xA;&lt;li&gt;&lt;strong&gt;Framework&lt;/strong&gt;: Hugging Face &lt;code&gt;transformers&lt;/code&gt; + &lt;code&gt;Trainer&lt;/code&gt; API for fine-tuning&lt;/li&gt;&#xA;&lt;li&gt;&lt;strong&gt;Model&lt;/strong&gt;: &lt;code&gt;deepseek-ai/deepseek-llm-7b&lt;/code&gt; (example model)&lt;/li&gt;&#xA;&lt;li&gt;&lt;strong&gt;Efficiency&lt;/strong&gt;: LoRA (Low-Rank Adaptation) + 4-bit quantization via &lt;code&gt;bitsandbytes&lt;/code&gt; to fit large models on consumer GPUs&lt;/li&gt;&#xA;&lt;/ul&gt;&#xA;&lt;h2 id=&#34;setup&#34;&gt;Setup&lt;a class=&#34;anchor&#34; href=&#34;#setup&#34;&gt;#&lt;/a&gt;&lt;/h2&gt;&#xA;&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; style=&#34;color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4;&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span style=&#34;display:flex;&#34;&gt;&lt;span&gt;pip install torch transformers datasets accelerate peft bitsandbytes&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;p&gt;Verify GPU: &lt;code&gt;nvidia-smi&lt;/code&gt; — need CUDA 11.8+.&lt;/p&gt;</description>
    </item>
  </channel>
</rss>
