Skip to content

Commit

Permalink
Auto start for all
Browse files Browse the repository at this point in the history
  • Loading branch information
ryan-the-crayon committed Apr 29, 2024
1 parent c428b03 commit 124d406
Show file tree
Hide file tree
Showing 7 changed files with 260 additions and 127 deletions.
2 changes: 2 additions & 0 deletions src/cliPref.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,13 @@ export async function getCliPref(logger?: SimpleLogger) {
const cliPrefSchema = z.object({
autoLaunchMinimizedWarned: z.boolean(),
lastLoadedModels: z.array(z.string()).optional(),
autoStartServer: z.boolean().optional(),
});
type CliPref = z.infer<typeof cliPrefSchema>;
const defaultCliPref: CliPref = {
autoLaunchMinimizedWarned: false,
lastLoadedModels: [],
autoStartServer: undefined,
};
const cliPref = new SimpleFileData(
path.join(os.homedir(), ".cache/lm-studio/.internal/cli-pref.json"),
Expand Down
133 changes: 121 additions & 12 deletions src/createClient.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,125 @@
import { text, type SimpleLogger } from "@lmstudio/lms-common";
import { LMStudioClient } from "@lmstudio/sdk";
import chalk from "chalk";
import { checkHttpServer, getServerLastStatus } from "./subcommands/server";
import { flag } from "cmd-ts";
import inquirer from "inquirer";
import { platform } from "os";
import { getCliPref } from "./cliPref";
import {
checkHttpServer,
getServerLastStatus,
startServer,
type StartServerOpts,
} from "./subcommands/server";

export async function createClient(logger: SimpleLogger) {
export const createClientArgs = {
yes: flag({
long: "yes",
short: "y",
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
}),
noLaunch: flag({
long: "no-launch",
description: text`
Don't launch LM Studio if it's not running. Have no effect if auto start server is disabled.
`,
}),
};

interface CreateClientArgs {
yes?: boolean;
noLaunch?: boolean;
}

async function maybeTryStartServer(logger: SimpleLogger, startServerOpts: StartServerOpts) {
const { yes } = startServerOpts;
const pref = await getCliPref(logger);
if (pref.get().autoStartServer === undefined && !yes) {
logger.warnWithoutPrefix(text`
${"\n"}${chalk.greenBright.underline("Server Auto Start")}
LM Studio needs to be running in server mode to perform this operation.${"\n"}
`);
const { cont } = await inquirer.prompt([
{
type: "confirm",
name: "cont",
message: "Do you want to always start the server if it's not running? (will not ask again)",
default: true,
},
]);
if (cont) {
logger.info("lms will automatically start the server if it's not running.");
} else {
logger.info("lms WILL NOT automatically start the server if it's not running.");
}
if (platform() === "win32") {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
pref.setWithImmer(draft => {
draft.autoStartServer = cont;
});
if (!cont) {
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
}
logger.info("Starting the server...");
return await startServer(logger, startServerOpts);
}
if (pref.get().autoStartServer === true) {
logger.info("LM Studio is not running in server mode. Starting the server...");
return await startServer(logger, startServerOpts);
} else if (pref.get().autoStartServer === false) {
logger.error("LM Studio needs to be running in the server mode to perform this operation.");
if (platform() === "win32") {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
} else {
// If not true or false, it's undefined
// Meaning --yes is used
logger.info(text`
LM Studio is not running in server mode. Starting the server because
${chalk.yellowBright("--yes")} is set
`);
return await startServer(logger, startServerOpts);
}
}

export interface CreateClientOpts {}

export async function createClient(
logger: SimpleLogger,
{ noLaunch, yes }: CreateClientArgs,
_opts: CreateClientOpts = {},
) {
let port: number;
try {
const lastStatus = await getServerLastStatus(logger);
Expand All @@ -13,16 +129,9 @@ export async function createClient(logger: SimpleLogger) {
port = 1234;
}
if (!(await checkHttpServer(logger, port))) {
logger.error(
text`
LM Studio needs to be running in server mode to perform this operation.
To start the server, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`,
);
process.exit(1);
if (!(await maybeTryStartServer(logger, { port, noLaunch, yes }))) {
process.exit(1);
}
}
const baseUrl = `ws:https://127.0.0.1:${port}`;
logger.debug(`Connecting to server with baseUrl ${port}`);
Expand Down
8 changes: 5 additions & 3 deletions src/subcommands/list.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import chalk from "chalk";
import { command, flag } from "cmd-ts";
import columnify from "columnify";
import { architectureInfoLookup } from "../architectureStylizations";
import { createClient } from "../createClient";
import { createClient, createClientArgs } from "../createClient";
import { formatSizeBytes1000, formatSizeBytesWithColor1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel";

Expand Down Expand Up @@ -189,6 +189,7 @@ export const ls = command({
description: "List all downloaded models",
args: {
...logLevelArgs,
...createClientArgs,
llm: flag({
long: "llm",
description: "Show only LLM models",
Expand All @@ -208,7 +209,7 @@ export const ls = command({
},
handler: async args => {
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);

const { llm, embedding, json, detailed } = args;

Expand Down Expand Up @@ -299,14 +300,15 @@ export const ps = command({
description: "List all loaded models",
args: {
...logLevelArgs,
...createClientArgs,
json: flag({
long: "json",
description: "Outputs in JSON format to stdout",
}),
},
handler: async args => {
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);

const { json } = args;

Expand Down
28 changes: 11 additions & 17 deletions src/subcommands/load.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import fuzzy from "fuzzy";
import inquirer from "inquirer";
import inquirerPrompt from "inquirer-autocomplete-prompt";
import { getCliPref } from "../cliPref";
import { createClient } from "../createClient";
import { createClient, createClientArgs } from "../createClient";
import { formatElapsedTime } from "../formatElapsedTime";
import { formatSizeBytes1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel";
Expand Down Expand Up @@ -42,6 +42,7 @@ export const load = command({
description: "Load a model",
args: {
...logLevelArgs,
...createClientArgs,
path: positional({
type: optional(string),
description: "The path of the model to load. If not provided, ",
Expand All @@ -62,8 +63,9 @@ export const load = command({
long: "yes",
short: "y",
description: text`
Answer yes to all prompts. If there are multiple models matching the path, the first one
will be loaded. Fails if the path provided does not match any model.
Suppress all confirmations and warnings. Useful for scripting. If there are multiple
models matching the path, the first one will be loaded. Fails if the path provided does not
match any model.
`,
}),
exact: flag({
Expand All @@ -87,7 +89,7 @@ export const load = command({
const { gpu, yes, exact, identifier } = args;
let { path } = args;
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);
const cliPref = await getCliPref(logger);

const lastLoadedModels = cliPref.get().lastLoadedModels ?? [];
Expand All @@ -104,19 +106,6 @@ export const load = command({
return aIndex < bIndex ? -1 : aIndex > bIndex ? 1 : 0;
});

if (exact && yes) {
logger.errorWithoutPrefix(
makeTitledPrettyError(
"Invalid usage",
text`
The ${chalk.yellowBright("--exact")} and ${chalk.yellowBright("--yes")} flags cannot be
used together.
`,
).message,
);
process.exit(1);
}

if (exact) {
const model = models.find(model => model.path === path);
if (path === undefined) {
Expand Down Expand Up @@ -188,6 +177,11 @@ export const load = command({
);
process.exit(1);
}
if (initialFilteredModels.length > 1) {
logger.warnText`
${initialFilteredModels.length} models match the provided path. Loading the first one.
`;
}
model = models[initialFilteredModels[0].index];
} else {
console.info();
Expand Down
Loading

0 comments on commit 124d406

Please sign in to comment.