mirror of
https://github.com/mudler/LocalAI.git
synced 2026-05-02 16:20:14 -05:00
6d5bde860b
* WIP * wip * wip * Make it compile * Update json.hpp * this shouldn't be private for now * Add logs * Reset auto detected template Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Re-enable grammars * This seems to be broken - https://github.com/ggml-org/llama.cpp/commit/360a9c98e13d35f322b4c5b1309aab0cc90ed82b#diff-a18a8e64e12a01167d8e98fc[…]cccf0d4eed09d76d879L2998-L3207 Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Placeholder * Simplify image loading * use completion type * disable streaming Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * correctly return timings Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Remove some debug logging * Adapt tests Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Keep header * embedding: do not use oai type Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Sync from server.cpp * Use utils and json directly from llama.cpp Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Sync with upstream Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: copy json.hpp from the correct location Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: add httplib * sync llama.cpp Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Embeddiongs: set OAICOMPAT_TYPE_EMBEDDING Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * feat: sync with server.cpp by including it Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * make it darwin-compatible Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
90 lines
3.2 KiB
CMake
90 lines
3.2 KiB
CMake
|
|
## XXX: In some versions of CMake clip wasn't being built before llama.
|
|
## This is an hack for now, but it should be fixed in the future.
|
|
# set(TARGET myclip)
|
|
# add_library(${TARGET} clip.cpp clip.h clip-impl.h llava.cpp llava.h)
|
|
# install(TARGETS ${TARGET} LIBRARY)
|
|
# target_include_directories(myclip PUBLIC .)
|
|
# target_include_directories(myclip PUBLIC ../..)
|
|
# target_include_directories(myclip PUBLIC ../../common)
|
|
# target_link_libraries(${TARGET} PRIVATE common ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
# target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
# if (NOT MSVC)
|
|
# target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h
|
|
# endif()
|
|
# END CLIP hack
|
|
|
|
|
|
set(TARGET grpc-server)
|
|
set(CMAKE_CXX_STANDARD 17)
|
|
cmake_minimum_required(VERSION 3.15)
|
|
set(TARGET grpc-server)
|
|
set(_PROTOBUF_LIBPROTOBUF libprotobuf)
|
|
set(_REFLECTION grpc++_reflection)
|
|
|
|
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
|
# Set correct Homebrew install folder for Apple Silicon and Intel Macs
|
|
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64")
|
|
set(HOMEBREW_DEFAULT_PREFIX "/opt/homebrew")
|
|
else()
|
|
set(HOMEBREW_DEFAULT_PREFIX "/usr/local")
|
|
endif()
|
|
|
|
link_directories("${HOMEBREW_DEFAULT_PREFIX}/lib")
|
|
include_directories("${HOMEBREW_DEFAULT_PREFIX}/include")
|
|
endif()
|
|
|
|
find_package(absl CONFIG REQUIRED)
|
|
find_package(Protobuf CONFIG REQUIRED)
|
|
find_package(gRPC CONFIG REQUIRED)
|
|
|
|
find_program(_PROTOBUF_PROTOC protoc)
|
|
set(_GRPC_GRPCPP grpc++)
|
|
find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin)
|
|
|
|
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
|
include_directories(${Protobuf_INCLUDE_DIRS})
|
|
|
|
message(STATUS "Using protobuf version ${Protobuf_VERSION} | Protobuf_INCLUDE_DIRS: ${Protobuf_INCLUDE_DIRS} | CMAKE_CURRENT_BINARY_DIR: ${CMAKE_CURRENT_BINARY_DIR}")
|
|
|
|
# Proto file
|
|
get_filename_component(hw_proto "../../../../../../backend/backend.proto" ABSOLUTE)
|
|
get_filename_component(hw_proto_path "${hw_proto}" PATH)
|
|
|
|
# Generated sources
|
|
set(hw_proto_srcs "${CMAKE_CURRENT_BINARY_DIR}/backend.pb.cc")
|
|
set(hw_proto_hdrs "${CMAKE_CURRENT_BINARY_DIR}/backend.pb.h")
|
|
set(hw_grpc_srcs "${CMAKE_CURRENT_BINARY_DIR}/backend.grpc.pb.cc")
|
|
set(hw_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/backend.grpc.pb.h")
|
|
|
|
add_custom_command(
|
|
OUTPUT "${hw_proto_srcs}" "${hw_proto_hdrs}" "${hw_grpc_srcs}" "${hw_grpc_hdrs}"
|
|
COMMAND ${_PROTOBUF_PROTOC}
|
|
ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}"
|
|
--cpp_out "${CMAKE_CURRENT_BINARY_DIR}"
|
|
-I "${hw_proto_path}"
|
|
--plugin=protoc-gen-grpc="${_GRPC_CPP_PLUGIN_EXECUTABLE}"
|
|
"${hw_proto}"
|
|
DEPENDS "${hw_proto}")
|
|
|
|
# hw_grpc_proto
|
|
add_library(hw_grpc_proto
|
|
${hw_grpc_srcs}
|
|
${hw_grpc_hdrs}
|
|
${hw_proto_srcs}
|
|
${hw_proto_hdrs} )
|
|
|
|
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp httplib.h)
|
|
|
|
target_include_directories(${TARGET} PRIVATE ../llava)
|
|
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
|
|
|
|
target_link_libraries(${TARGET} PRIVATE common llama mtmd ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto
|
|
absl::flags_parse
|
|
gRPC::${_REFLECTION}
|
|
gRPC::${_GRPC_GRPCPP}
|
|
protobuf::${_PROTOBUF_LIBPROTOBUF})
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
if(TARGET BUILD_INFO)
|
|
add_dependencies(${TARGET} BUILD_INFO)
|
|
endif() |