Skip to content

Instantly share code, notes, and snippets.

@chottokun
Created April 7, 2024 10:47
Show Gist options
  • Save chottokun/f864944115c8e53f709102678767ccab to your computer and use it in GitHub Desktop.
Save chottokun/f864944115c8e53f709102678767ccab to your computer and use it in GitHub Desktop.
command-r-plus-Q2_K.gguf.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "L4",
"machine_shape": "hm",
"authorship_tag": "ABX9TyOh+DxRJ2sFeYmuj8jl+C8I",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/chottokun/f864944115c8e53f709102678767ccab/command-r-plus-q2_k-gguf.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"source": [
"!nvidia-smi"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "9x42F0dpXa9w",
"outputId": "367ca4f6-d55a-44cd-fc0c-2a5cf12c15cf"
},
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Sun Apr 7 00:37:57 2024 \n",
"+---------------------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 |\n",
"|-----------------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n",
"| | | MIG M. |\n",
"|=========================================+======================+======================|\n",
"| 0 NVIDIA L4 Off | 00000000:00:03.0 Off | 0 |\n",
"| N/A 35C P8 12W / 72W | 1MiB / 23034MiB | 0% Default |\n",
"| | | N/A |\n",
"+-----------------------------------------+----------------------+----------------------+\n",
" \n",
"+---------------------------------------------------------------------------------------+\n",
"| Processes: |\n",
"| GPU GI CI PID Type Process name GPU Memory |\n",
"| ID ID Usage |\n",
"|=======================================================================================|\n",
"| No running processes found |\n",
"+---------------------------------------------------------------------------------------+\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# https://huggingface.co/pmysl/c4ai-command-r-plus-GGUF"
],
"metadata": {
"id": "r9nDJHDSKZ7y"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!git clone https://github.com/Carolinabanana/llama.cpp.git llama.cpp-fork"
],
"metadata": {
"id": "ra8tiRqv980T"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"%cd llama.cpp-fork\n",
"!git reset --hard 8b6577bd631fec33eeadb4b9dfc5a07ed2118148"
],
"metadata": {
"id": "WikAgmnkCmnh"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!mkdir build\n",
"%cd build\n",
"!cmake .. -DLLAMA_CUBLAS=ON\n",
"!cmake --build . --config Release\n",
"!cp bin/main ..\n",
"%cd .."
],
"metadata": {
"id": "v4U-jJq7CrDq"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!wget -c https://huggingface.co/pmysl/c4ai-command-r-plus-GGUF/resolve/main/command-r-plus-Q2_K.gguf"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "DPoEV4nfuomi",
"outputId": "8dd2ffec-a5ce-4780-d1ee-649c8f3359ae"
},
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"--2024-04-07 00:38:41-- https://huggingface.co/pmysl/c4ai-command-r-plus-GGUF/resolve/main/command-r-plus-Q2_K.gguf\n",
"Resolving huggingface.co (huggingface.co)... 18.164.174.118, 18.164.174.17, 18.164.174.55, ...\n",
"Connecting to huggingface.co (huggingface.co)|18.164.174.118|:443... connected.\n",
"HTTP request sent, awaiting response... 302 Found\n",
"Location: https://cdn-lfs-us-1.huggingface.co/repos/fb/4a/fb4a485e57e2d156014ca5c4a09f1532398aa6485c0972ac660b479c95dce198/47e139a57872a72096c05b043b1ec6c2f08451da7df0d84d45168708667b98f5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27command-r-plus-Q2_K.gguf%3B+filename%3D%22command-r-plus-Q2_K.gguf%22%3B&Expires=1712709521&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjcwOTUyMX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zL2ZiLzRhL2ZiNGE0ODVlNTdlMmQxNTYwMTRjYTVjNGEwOWYxNTMyMzk4YWE2NDg1YzA5NzJhYzY2MGI0NzljOTVkY2UxOTgvNDdlMTM5YTU3ODcyYTcyMDk2YzA1YjA0M2IxZWM2YzJmMDg0NTFkYTdkZjBkODRkNDUxNjg3MDg2NjdiOThmNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=KnJ7e0GdxNmjNE3bz-2l5uiXxqp7yYdI%7EL47UCyADp7FkJ6TMoU2BMYHJWDgXekBHmZrYcapL2Hr9dKBfTgrKTubDT2gvGv8ZKjAsxmPIlD-bXS7YHjjHVt6%7EXeUH6F2S5i9SFoqiqPDs4x%7EjrJg3dgEU7IsZHrtYOhaDuYwzwarXUXEw3sKNxiX0mCl8rnxNCxSrlmMWu1qiQND54WT3Exvtpj7ROlF8rdFtsb7ssLrRcUaOR0QnOl%7Et3DiBPDPmZ7E2v-2%7EUtiLvNHukMDtoczpUv3Stjbi1yh%7E2QP%7EqY4Qasf-7S5nJ-glY6AAS9K7nRJB4CR%7E1Dut8NzFBP1qA__&Key-Pair-Id=KCD77M1F0VK2B [following]\n",
"--2024-04-07 00:38:41-- https://cdn-lfs-us-1.huggingface.co/repos/fb/4a/fb4a485e57e2d156014ca5c4a09f1532398aa6485c0972ac660b479c95dce198/47e139a57872a72096c05b043b1ec6c2f08451da7df0d84d45168708667b98f5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27command-r-plus-Q2_K.gguf%3B+filename%3D%22command-r-plus-Q2_K.gguf%22%3B&Expires=1712709521&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjcwOTUyMX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zL2ZiLzRhL2ZiNGE0ODVlNTdlMmQxNTYwMTRjYTVjNGEwOWYxNTMyMzk4YWE2NDg1YzA5NzJhYzY2MGI0NzljOTVkY2UxOTgvNDdlMTM5YTU3ODcyYTcyMDk2YzA1YjA0M2IxZWM2YzJmMDg0NTFkYTdkZjBkODRkNDUxNjg3MDg2NjdiOThmNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=KnJ7e0GdxNmjNE3bz-2l5uiXxqp7yYdI%7EL47UCyADp7FkJ6TMoU2BMYHJWDgXekBHmZrYcapL2Hr9dKBfTgrKTubDT2gvGv8ZKjAsxmPIlD-bXS7YHjjHVt6%7EXeUH6F2S5i9SFoqiqPDs4x%7EjrJg3dgEU7IsZHrtYOhaDuYwzwarXUXEw3sKNxiX0mCl8rnxNCxSrlmMWu1qiQND54WT3Exvtpj7ROlF8rdFtsb7ssLrRcUaOR0QnOl%7Et3DiBPDPmZ7E2v-2%7EUtiLvNHukMDtoczpUv3Stjbi1yh%7E2QP%7EqY4Qasf-7S5nJ-glY6AAS9K7nRJB4CR%7E1Dut8NzFBP1qA__&Key-Pair-Id=KCD77M1F0VK2B\n",
"Resolving cdn-lfs-us-1.huggingface.co (cdn-lfs-us-1.huggingface.co)... 18.65.25.64, 18.65.25.119, 18.65.25.113, ...\n",
"Connecting to cdn-lfs-us-1.huggingface.co (cdn-lfs-us-1.huggingface.co)|18.65.25.64|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 39497377024 (37G) [binary/octet-stream]\n",
"Saving to: ‘command-r-plus-Q2_K.gguf’\n",
"\n",
"command-r-plus-Q2_K 100%[===================>] 36.78G 38.7MB/s in 11m 30s \n",
"\n",
"2024-04-07 00:50:11 (54.6 MB/s) - ‘command-r-plus-Q2_K.gguf’ saved [39497377024/39497377024]\n",
"\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!./main -p \"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>Who are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\" --color -m /content/command-r-plus-Q2_K.gguf -ngl 31 -c 256\n"
],
"metadata": {
"id": "s8HDqJ0P-lmi",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "52f4e483-64db-4756-94e8-58dfdb4fc1e7"
},
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Log start\n",
"main: build = 2622 (8b6577b)\n",
"main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu\n",
"main: seed = 1712451700\n",
"llama_model_loader: loaded meta data with 23 key-value pairs and 642 tensors from /content/command-r-plus-Q2_K.gguf (version GGUF V3 (latest))\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = command-r\n",
"llama_model_loader: - kv 1: general.name str = c4ai-command-r-plus\n",
"llama_model_loader: - kv 2: command-r.block_count u32 = 64\n",
"llama_model_loader: - kv 3: command-r.context_length u32 = 131072\n",
"llama_model_loader: - kv 4: command-r.embedding_length u32 = 12288\n",
"llama_model_loader: - kv 5: command-r.feed_forward_length u32 = 33792\n",
"llama_model_loader: - kv 6: command-r.attention.head_count u32 = 96\n",
"llama_model_loader: - kv 7: command-r.attention.head_count_kv u32 = 8\n",
"llama_model_loader: - kv 8: command-r.rope.freq_base f32 = 75000000.000000\n",
"llama_model_loader: - kv 9: command-r.attention.layer_norm_epsilon f32 = 0.000010\n",
"llama_model_loader: - kv 10: general.file_type u32 = 10\n",
"llama_model_loader: - kv 11: command-r.logit_scale f32 = 0.833333\n",
"llama_model_loader: - kv 12: command-r.rope.scaling.type str = none\n",
"llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2\n",
"llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = [\"<PAD>\", \"<UNK>\", \"<CLS>\", \"<SEP>\", ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...\n",
"llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = [\"Ġ Ġ\", \"Ġ t\", \"e r\", \"i n\", \"Ġ a...\n",
"llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5\n",
"llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001\n",
"llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0\n",
"llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true\n",
"llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false\n",
"llama_model_loader: - kv 22: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 193 tensors\n",
"llama_model_loader: - type q2_K: 256 tensors\n",
"llama_model_loader: - type q3_K: 128 tensors\n",
"llama_model_loader: - type q4_K: 64 tensors\n",
"llama_model_loader: - type q6_K: 1 tensors\n",
"llm_load_vocab: special tokens definition check successful ( 1008/256000 ).\n",
"llm_load_print_meta: format = GGUF V3 (latest)\n",
"llm_load_print_meta: arch = command-r\n",
"llm_load_print_meta: vocab type = BPE\n",
"llm_load_print_meta: n_vocab = 256000\n",
"llm_load_print_meta: n_merges = 253333\n",
"llm_load_print_meta: n_ctx_train = 131072\n",
"llm_load_print_meta: n_embd = 12288\n",
"llm_load_print_meta: n_head = 96\n",
"llm_load_print_meta: n_head_kv = 8\n",
"llm_load_print_meta: n_layer = 64\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 12\n",
"llm_load_print_meta: n_embd_k_gqa = 1024\n",
"llm_load_print_meta: n_embd_v_gqa = 1024\n",
"llm_load_print_meta: f_norm_eps = 1.0e-05\n",
"llm_load_print_meta: f_norm_rms_eps = 0.0e+00\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 8.3e-01\n",
"llm_load_print_meta: n_ff = 33792\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = none\n",
"llm_load_print_meta: freq_base_train = 75000000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_yarn_orig_ctx = 131072\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = ?B\n",
"llm_load_print_meta: model ftype = Q2_K - Medium\n",
"llm_load_print_meta: model params = 103.81 B\n",
"llm_load_print_meta: model size = 36.77 GiB (3.04 BPW) \n",
"llm_load_print_meta: general.name = c4ai-command-r-plus\n",
"llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'\n",
"llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'\n",
"llm_load_print_meta: PAD token = 0 '<PAD>'\n",
"llm_load_print_meta: LF token = 136 'Ä'\n",
"ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no\n",
"ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes\n",
"ggml_cuda_init: found 1 CUDA devices:\n",
" Device 0: NVIDIA L4, compute capability 8.9, VMM: yes\n",
"llm_load_tensors: ggml ctx size = 0.49 MiB\n",
"llm_load_tensors: offloading 31 repeating layers to GPU\n",
"llm_load_tensors: offloaded 31/65 layers to GPU\n",
"llm_load_tensors: CPU buffer size = 37657.23 MiB\n",
"llm_load_tensors: CUDA0 buffer size = 17048.18 MiB\n",
"..........................................................................................\n",
"llama_new_context_with_model: n_ctx = 256\n",
"llama_new_context_with_model: n_batch = 256\n",
"llama_new_context_with_model: n_ubatch = 256\n",
"llama_new_context_with_model: freq_base = 75000000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CUDA_Host KV buffer size = 33.00 MiB\n",
"llama_kv_cache_init: CUDA0 KV buffer size = 31.00 MiB\n",
"llama_new_context_with_model: KV self size = 64.00 MiB, K (f16): 32.00 MiB, V (f16): 32.00 MiB\n",
"llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB\n",
"llama_new_context_with_model: CUDA0 compute buffer size = 2734.94 MiB\n",
"llama_new_context_with_model: CUDA_Host compute buffer size = 12.25 MiB\n",
"llama_new_context_with_model: graph nodes = 2312\n",
"llama_new_context_with_model: graph splits = 433\n",
"\n",
"system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | \n",
"sampling: \n",
"\trepeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n",
"\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n",
"\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
"sampling order: \n",
"CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
"generate: n_ctx = 256, n_batch = 2048, n_predict = -1, n_keep = 1\n",
"\n",
"\n",
"\u001b[33m<|START_OF_TURN_TOKEN|><|USER_TOKEN|>Who are you?<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\u001b[0mMy name is Coral, I am an AI assistant chatbot designed to assist humans by providing thorough responses. How can I help you today? [end of text]\n",
"\n",
"llama_print_timings: load time = 160188.71 ms\n",
"llama_print_timings: sample time = 8.15 ms / 28 runs ( 0.29 ms per token, 3436.00 tokens per second)\n",
"llama_print_timings: prompt eval time = 5626.28 ms / 10 tokens ( 562.63 ms per token, 1.78 tokens per second)\n",
"llama_print_timings: eval time = 17713.83 ms / 27 runs ( 656.07 ms per token, 1.52 tokens per second)\n",
"llama_print_timings: total time = 23412.50 ms / 37 tokens\n",
"Log end\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%%time\n",
"!./main -p \"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>日本の観光地でおススメは?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\" --color -m /content/command-r-plus-Q2_K.gguf -ngl 31 -c 256\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "TqEADIEWFdGv",
"outputId": "851e1770-50e6-4e34-c0d9-da4db3af280e"
},
"execution_count": 11,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Log start\n",
"main: build = 2622 (8b6577b)\n",
"main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu\n",
"main: seed = 1712452022\n",
"llama_model_loader: loaded meta data with 23 key-value pairs and 642 tensors from /content/command-r-plus-Q2_K.gguf (version GGUF V3 (latest))\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = command-r\n",
"llama_model_loader: - kv 1: general.name str = c4ai-command-r-plus\n",
"llama_model_loader: - kv 2: command-r.block_count u32 = 64\n",
"llama_model_loader: - kv 3: command-r.context_length u32 = 131072\n",
"llama_model_loader: - kv 4: command-r.embedding_length u32 = 12288\n",
"llama_model_loader: - kv 5: command-r.feed_forward_length u32 = 33792\n",
"llama_model_loader: - kv 6: command-r.attention.head_count u32 = 96\n",
"llama_model_loader: - kv 7: command-r.attention.head_count_kv u32 = 8\n",
"llama_model_loader: - kv 8: command-r.rope.freq_base f32 = 75000000.000000\n",
"llama_model_loader: - kv 9: command-r.attention.layer_norm_epsilon f32 = 0.000010\n",
"llama_model_loader: - kv 10: general.file_type u32 = 10\n",
"llama_model_loader: - kv 11: command-r.logit_scale f32 = 0.833333\n",
"llama_model_loader: - kv 12: command-r.rope.scaling.type str = none\n",
"llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2\n",
"llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = [\"<PAD>\", \"<UNK>\", \"<CLS>\", \"<SEP>\", ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...\n",
"llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = [\"Ġ Ġ\", \"Ġ t\", \"e r\", \"i n\", \"Ġ a...\n",
"llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5\n",
"llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001\n",
"llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0\n",
"llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true\n",
"llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false\n",
"llama_model_loader: - kv 22: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 193 tensors\n",
"llama_model_loader: - type q2_K: 256 tensors\n",
"llama_model_loader: - type q3_K: 128 tensors\n",
"llama_model_loader: - type q4_K: 64 tensors\n",
"llama_model_loader: - type q6_K: 1 tensors\n",
"llm_load_vocab: special tokens definition check successful ( 1008/256000 ).\n",
"llm_load_print_meta: format = GGUF V3 (latest)\n",
"llm_load_print_meta: arch = command-r\n",
"llm_load_print_meta: vocab type = BPE\n",
"llm_load_print_meta: n_vocab = 256000\n",
"llm_load_print_meta: n_merges = 253333\n",
"llm_load_print_meta: n_ctx_train = 131072\n",
"llm_load_print_meta: n_embd = 12288\n",
"llm_load_print_meta: n_head = 96\n",
"llm_load_print_meta: n_head_kv = 8\n",
"llm_load_print_meta: n_layer = 64\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 12\n",
"llm_load_print_meta: n_embd_k_gqa = 1024\n",
"llm_load_print_meta: n_embd_v_gqa = 1024\n",
"llm_load_print_meta: f_norm_eps = 1.0e-05\n",
"llm_load_print_meta: f_norm_rms_eps = 0.0e+00\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 8.3e-01\n",
"llm_load_print_meta: n_ff = 33792\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = none\n",
"llm_load_print_meta: freq_base_train = 75000000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_yarn_orig_ctx = 131072\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = ?B\n",
"llm_load_print_meta: model ftype = Q2_K - Medium\n",
"llm_load_print_meta: model params = 103.81 B\n",
"llm_load_print_meta: model size = 36.77 GiB (3.04 BPW) \n",
"llm_load_print_meta: general.name = c4ai-command-r-plus\n",
"llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'\n",
"llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'\n",
"llm_load_print_meta: PAD token = 0 '<PAD>'\n",
"llm_load_print_meta: LF token = 136 'Ä'\n",
"ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no\n",
"ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes\n",
"ggml_cuda_init: found 1 CUDA devices:\n",
" Device 0: NVIDIA L4, compute capability 8.9, VMM: yes\n",
"llm_load_tensors: ggml ctx size = 0.49 MiB\n",
"llm_load_tensors: offloading 31 repeating layers to GPU\n",
"llm_load_tensors: offloaded 31/65 layers to GPU\n",
"llm_load_tensors: CPU buffer size = 37657.23 MiB\n",
"llm_load_tensors: CUDA0 buffer size = 17048.18 MiB\n",
"..........................................................................................\n",
"llama_new_context_with_model: n_ctx = 256\n",
"llama_new_context_with_model: n_batch = 256\n",
"llama_new_context_with_model: n_ubatch = 256\n",
"llama_new_context_with_model: freq_base = 75000000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CUDA_Host KV buffer size = 33.00 MiB\n",
"llama_kv_cache_init: CUDA0 KV buffer size = 31.00 MiB\n",
"llama_new_context_with_model: KV self size = 64.00 MiB, K (f16): 32.00 MiB, V (f16): 32.00 MiB\n",
"llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB\n",
"llama_new_context_with_model: CUDA0 compute buffer size = 2734.94 MiB\n",
"llama_new_context_with_model: CUDA_Host compute buffer size = 12.25 MiB\n",
"llama_new_context_with_model: graph nodes = 2312\n",
"llama_new_context_with_model: graph splits = 433\n",
"\n",
"system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | \n",
"sampling: \n",
"\trepeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n",
"\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n",
"\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
"sampling order: \n",
"CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
"generate: n_ctx = 256, n_batch = 2048, n_predict = -1, n_keep = 1\n",
"\n",
"\n",
"\u001b[33m<|START_OF_TURN_TOKEN|><|USER_TOKEN|>日本の観光地でおススメは?<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\u001b[0m日本には全国各地に多くの魅力的な観光地がありますが、その中でも特にオススメの観光地をいくつかご紹介します。\n",
"\n",
"- 東京:東京は日本の首都で、近代的な高層ビルや伝統的な神社仏閣など、さまざまな顔を持つ都市です。浅草の浅草寺や東京スカイツリー、お台場の東京タワーやレインボーブリッジ、新宿の新宿御苑や歌舞伎町など、さまざまなスポットを楽しむことができます。\n",
"\n",
"- 京都:京都は日本の古都として知られ、美しい寺院や神社、伝統的な町並みが有名です。清水寺や金閣寺(鹿苑寺)、伏見稲荷大社など、世界的に有名な寺院が数多くあります。また、京都は伝統的な日本料理やお茶屋さんでも知られています。\n",
"\n",
"- 沖縄:沖縄は日本の南に位置する温暖な気候のリゾート地で、美しい海やビーチ、独特の文化や伝統で知られています。沖縄本島の美ら海水族館や国際通り、離島の石垣島や竹富島など、さまざまな魅力に溢れています。\n",
"\n",
"- 北海道:北海道は日本の北に位置し、雄大な自然や美味しい食べ物で有名です。札幌や小樽、富良野など、人気の観光地が数多くあります。また、北海道はラベンダーやカニ料理でも知られています。\n",
"\n",
"これらはほんの一部の例ですが、日本には他にもたくさんの魅力的な場所があります。\n",
"clusus\n",
"\n",
"日本には全国に「桜の名所」と呼ばれる場所がたくさんあります。その中でも特に有名なのが、東京都の「上野公園」と「大阪市の「大阪城公園」です。どちらも都心から近い場所にある公園で、桜の季節になると多くの人で賑わいます。また、両方の公園とも近くに動物園があり、桜を見ながら動物を見ることもできます。上野公園には東京国立博物館や上野動物園があり、大阪城公園には大阪城天守閣や大阪歴史博物館があります。どちらの公園も桜の季節だけでなく、1年を通して多くの人で賌ます。桜の名所は全国各地にありますが、都心から近い場所にあるこれらの公園は特に人気があるようです。\n",
"\n",
"「桜前線」という言葉を聞いたことがあるでしょうか?これは桜の咲く季節が南から北へと移動していく様子を表したものです。桜前線は春の訪れを告げるものとして知られており、桜の季節になると人々の話題となります。桜前線は日本の春の代名詞ともいえるもので、人々はこの前線とともに春の到来を待つのです。桜前線とともに春の訪れを感じることができるのは、日本ならではといえるでしょう。\n",
"\n",
"「桜前線」は気象庁によって発表されており、桜の咲く季節の目安として用いられています。気象庁は桜前線の移動を観測し、その情報を日本全国の気象台を通じて発表しています。桜前線は気象庁の発表によって、人々に春の訪れを知らせるものとして用いられています。気象庁の発表によって桜前線は人々に春の到来を知らせるものとして用いられており、その発表は人々の話題ともなっています。気象庁の発表は春の訪れを知らせるものとして用いられており、人々はこの発表によって春の到来を感じることができるのです。桜前線とともに春の訪れを知らせる気象庁の発表は、人々の話題ともなっています。気象庁の発表によって春の訪れを知らせる桜前線は、人々の話題ともなっています。気象庁の発表によって春の訪れを知らせる桜前線は、人々の話題ともなっています。気象庁の発表によって春の訪れを知らせる桜前線は、人々の話題ともなっています。気象庁の発表によって春の訪れを知らせる桜前線は、人々の話題ともなっています。気象庁の発表によって春の訪れを知らせる桜前線は、人々の話題ともなっています。気象庁の発表によって春の訪れを知らせる桜前線は、人々の\n",
"人々の話題ともなっています。人々の話題ともなっています。人々の話題とも\n",
"なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なっております。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。なおります。な 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
"\n",
"llama_print_timings: load time = 6914.55 ms\n",
"\n",
"llama_print_timings: sample time = 1682.46 ms / 5799 runs ( 0.29 ms per token, 3446.75 tokens per second)\n",
"llama_print_timings: prompt eval time = 7417.23 ms / 16 tokens ( 463.58 ms per token, 2.16 tokens per second)\n",
"\n",
"llama_print_timings: eval time = 3839454.51 ms / 5798 runs ( 662.20 ms per token, 1.51 tokens per second)\n",
"llama_print_timings: load time = 6914.55 ms\n",
"llama_print_timings: total time = 3862124.21 ms / 5814 tokens\n",
"llama_print_timings: sample time = 1682.46 ms / 5799 runs ( 0.29 ms per token, 3446.75 tokens per second)\n",
"CPU times: user 36.7 s, sys: 5.23 s, total: 41.9 s\n",
"Wall time: 1h 4min 31s\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%%time\n",
"!./main -p \"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>日本で二番目に高い山は?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\" --color -m /content/command-r-plus-Q2_K.gguf -ngl 31 -c 256"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "o4OuZ4NRGhss",
"outputId": "b0162d2e-2378-450e-b690-7b622602f5fe"
},
"execution_count": 12,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Log start\n",
"main: build = 2622 (8b6577b)\n",
"main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu\n",
"main: seed = 1712455893\n",
"llama_model_loader: loaded meta data with 23 key-value pairs and 642 tensors from /content/command-r-plus-Q2_K.gguf (version GGUF V3 (latest))\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = command-r\n",
"llama_model_loader: - kv 1: general.name str = c4ai-command-r-plus\n",
"llama_model_loader: - kv 2: command-r.block_count u32 = 64\n",
"llama_model_loader: - kv 3: command-r.context_length u32 = 131072\n",
"llama_model_loader: - kv 4: command-r.embedding_length u32 = 12288\n",
"llama_model_loader: - kv 5: command-r.feed_forward_length u32 = 33792\n",
"llama_model_loader: - kv 6: command-r.attention.head_count u32 = 96\n",
"llama_model_loader: - kv 7: command-r.attention.head_count_kv u32 = 8\n",
"llama_model_loader: - kv 8: command-r.rope.freq_base f32 = 75000000.000000\n",
"llama_model_loader: - kv 9: command-r.attention.layer_norm_epsilon f32 = 0.000010\n",
"llama_model_loader: - kv 10: general.file_type u32 = 10\n",
"llama_model_loader: - kv 11: command-r.logit_scale f32 = 0.833333\n",
"llama_model_loader: - kv 12: command-r.rope.scaling.type str = none\n",
"llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2\n",
"llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = [\"<PAD>\", \"<UNK>\", \"<CLS>\", \"<SEP>\", ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...\n",
"llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = [\"Ġ Ġ\", \"Ġ t\", \"e r\", \"i n\", \"Ġ a...\n",
"llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5\n",
"llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001\n",
"llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0\n",
"llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true\n",
"llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false\n",
"llama_model_loader: - kv 22: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 193 tensors\n",
"llama_model_loader: - type q2_K: 256 tensors\n",
"llama_model_loader: - type q3_K: 128 tensors\n",
"llama_model_loader: - type q4_K: 64 tensors\n",
"llama_model_loader: - type q6_K: 1 tensors\n",
"llm_load_vocab: special tokens definition check successful ( 1008/256000 ).\n",
"llm_load_print_meta: format = GGUF V3 (latest)\n",
"llm_load_print_meta: arch = command-r\n",
"llm_load_print_meta: vocab type = BPE\n",
"llm_load_print_meta: n_vocab = 256000\n",
"llm_load_print_meta: n_merges = 253333\n",
"llm_load_print_meta: n_ctx_train = 131072\n",
"llm_load_print_meta: n_embd = 12288\n",
"llm_load_print_meta: n_head = 96\n",
"llm_load_print_meta: n_head_kv = 8\n",
"llm_load_print_meta: n_layer = 64\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 12\n",
"llm_load_print_meta: n_embd_k_gqa = 1024\n",
"llm_load_print_meta: n_embd_v_gqa = 1024\n",
"llm_load_print_meta: f_norm_eps = 1.0e-05\n",
"llm_load_print_meta: f_norm_rms_eps = 0.0e+00\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 8.3e-01\n",
"llm_load_print_meta: n_ff = 33792\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = none\n",
"llm_load_print_meta: freq_base_train = 75000000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_yarn_orig_ctx = 131072\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = ?B\n",
"llm_load_print_meta: model ftype = Q2_K - Medium\n",
"llm_load_print_meta: model params = 103.81 B\n",
"llm_load_print_meta: model size = 36.77 GiB (3.04 BPW) \n",
"llm_load_print_meta: general.name = c4ai-command-r-plus\n",
"llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'\n",
"llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'\n",
"llm_load_print_meta: PAD token = 0 '<PAD>'\n",
"llm_load_print_meta: LF token = 136 'Ä'\n",
"ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no\n",
"ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes\n",
"ggml_cuda_init: found 1 CUDA devices:\n",
" Device 0: NVIDIA L4, compute capability 8.9, VMM: yes\n",
"llm_load_tensors: ggml ctx size = 0.49 MiB\n",
"llm_load_tensors: offloading 31 repeating layers to GPU\n",
"llm_load_tensors: offloaded 31/65 layers to GPU\n",
"llm_load_tensors: CPU buffer size = 37657.23 MiB\n",
"llm_load_tensors: CUDA0 buffer size = 17048.18 MiB\n",
"..........................................................................................\n",
"llama_new_context_with_model: n_ctx = 256\n",
"llama_new_context_with_model: n_batch = 256\n",
"llama_new_context_with_model: n_ubatch = 256\n",
"llama_new_context_with_model: freq_base = 75000000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CUDA_Host KV buffer size = 33.00 MiB\n",
"llama_kv_cache_init: CUDA0 KV buffer size = 31.00 MiB\n",
"llama_new_context_with_model: KV self size = 64.00 MiB, K (f16): 32.00 MiB, V (f16): 32.00 MiB\n",
"llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB\n",
"llama_new_context_with_model: CUDA0 compute buffer size = 2734.94 MiB\n",
"llama_new_context_with_model: CUDA_Host compute buffer size = 12.25 MiB\n",
"llama_new_context_with_model: graph nodes = 2312\n",
"llama_new_context_with_model: graph splits = 433\n",
"\n",
"system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | \n",
"sampling: \n",
"\trepeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n",
"\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n",
"\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
"sampling order: \n",
"CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
"generate: n_ctx = 256, n_batch = 2048, n_predict = -1, n_keep = 1\n",
"\n",
"\n",
"\u001b[33m<|START_OF_TURN_TOKEN|><|USER_TOKEN|>日本で二番目に高い山は?<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\u001b[0m日本で二番目に高い山は、奥穂高岳です。奥穂高岳は標高3,190メートルで、長野県と岐阜県の県境に位置しています。奥穂高岳は日本アルプスの山の一つで、北アルプス南部の主峰である穂高岳を構成する山の1つです。奥穂高岳は鋭く切り立った岩峰で、3,000メートル級の鋭鋒として有名な山です。 [end of text]\n",
"\n",
"llama_print_timings: load time = 6990.86 ms\n",
"llama_print_timings: sample time = 27.18 ms / 96 runs ( 0.28 ms per token, 3532.66 tokens per second)\n",
"llama_print_timings: prompt eval time = 6660.20 ms / 14 tokens ( 475.73 ms per token, 2.10 tokens per second)\n",
"llama_print_timings: eval time = 62557.41 ms / 95 runs ( 658.50 ms per token, 1.52 tokens per second)\n",
"llama_print_timings: total time = 69473.01 ms / 109 tokens\n",
"Log end\n",
"CPU times: user 741 ms, sys: 98.8 ms, total: 840 ms\n",
"Wall time: 1min 18s\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%%time\n",
"!./main -p \"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>まどか☆マギカで一番かわいいのは?考察をした上で回答してください。<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\" --color -m /content/command-r-plus-Q2_K.gguf -ngl 31 -c 256"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "4BzN9ER2G7o2",
"outputId": "20c75d39-2d8a-43d1-e839-11a479ede2e0"
},
"execution_count": 13,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Log start\n",
"main: build = 2622 (8b6577b)\n",
"main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu\n",
"main: seed = 1712455972\n",
"llama_model_loader: loaded meta data with 23 key-value pairs and 642 tensors from /content/command-r-plus-Q2_K.gguf (version GGUF V3 (latest))\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = command-r\n",
"llama_model_loader: - kv 1: general.name str = c4ai-command-r-plus\n",
"llama_model_loader: - kv 2: command-r.block_count u32 = 64\n",
"llama_model_loader: - kv 3: command-r.context_length u32 = 131072\n",
"llama_model_loader: - kv 4: command-r.embedding_length u32 = 12288\n",
"llama_model_loader: - kv 5: command-r.feed_forward_length u32 = 33792\n",
"llama_model_loader: - kv 6: command-r.attention.head_count u32 = 96\n",
"llama_model_loader: - kv 7: command-r.attention.head_count_kv u32 = 8\n",
"llama_model_loader: - kv 8: command-r.rope.freq_base f32 = 75000000.000000\n",
"llama_model_loader: - kv 9: command-r.attention.layer_norm_epsilon f32 = 0.000010\n",
"llama_model_loader: - kv 10: general.file_type u32 = 10\n",
"llama_model_loader: - kv 11: command-r.logit_scale f32 = 0.833333\n",
"llama_model_loader: - kv 12: command-r.rope.scaling.type str = none\n",
"llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2\n",
"llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = [\"<PAD>\", \"<UNK>\", \"<CLS>\", \"<SEP>\", ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...\n",
"llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = [\"Ġ Ġ\", \"Ġ t\", \"e r\", \"i n\", \"Ġ a...\n",
"llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5\n",
"llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001\n",
"llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0\n",
"llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true\n",
"llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false\n",
"llama_model_loader: - kv 22: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 193 tensors\n",
"llama_model_loader: - type q2_K: 256 tensors\n",
"llama_model_loader: - type q3_K: 128 tensors\n",
"llama_model_loader: - type q4_K: 64 tensors\n",
"llama_model_loader: - type q6_K: 1 tensors\n",
"llm_load_vocab: special tokens definition check successful ( 1008/256000 ).\n",
"llm_load_print_meta: format = GGUF V3 (latest)\n",
"llm_load_print_meta: arch = command-r\n",
"llm_load_print_meta: vocab type = BPE\n",
"llm_load_print_meta: n_vocab = 256000\n",
"llm_load_print_meta: n_merges = 253333\n",
"llm_load_print_meta: n_ctx_train = 131072\n",
"llm_load_print_meta: n_embd = 12288\n",
"llm_load_print_meta: n_head = 96\n",
"llm_load_print_meta: n_head_kv = 8\n",
"llm_load_print_meta: n_layer = 64\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 12\n",
"llm_load_print_meta: n_embd_k_gqa = 1024\n",
"llm_load_print_meta: n_embd_v_gqa = 1024\n",
"llm_load_print_meta: f_norm_eps = 1.0e-05\n",
"llm_load_print_meta: f_norm_rms_eps = 0.0e+00\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 8.3e-01\n",
"llm_load_print_meta: n_ff = 33792\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = none\n",
"llm_load_print_meta: freq_base_train = 75000000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_yarn_orig_ctx = 131072\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = ?B\n",
"llm_load_print_meta: model ftype = Q2_K - Medium\n",
"llm_load_print_meta: model params = 103.81 B\n",
"llm_load_print_meta: model size = 36.77 GiB (3.04 BPW) \n",
"llm_load_print_meta: general.name = c4ai-command-r-plus\n",
"llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'\n",
"llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'\n",
"llm_load_print_meta: PAD token = 0 '<PAD>'\n",
"llm_load_print_meta: LF token = 136 'Ä'\n",
"ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no\n",
"ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes\n",
"ggml_cuda_init: found 1 CUDA devices:\n",
" Device 0: NVIDIA L4, compute capability 8.9, VMM: yes\n",
"llm_load_tensors: ggml ctx size = 0.49 MiB\n",
"llm_load_tensors: offloading 31 repeating layers to GPU\n",
"llm_load_tensors: offloaded 31/65 layers to GPU\n",
"llm_load_tensors: CPU buffer size = 37657.23 MiB\n",
"llm_load_tensors: CUDA0 buffer size = 17048.18 MiB\n",
"..........................................................................................\n",
"llama_new_context_with_model: n_ctx = 256\n",
"llama_new_context_with_model: n_batch = 256\n",
"llama_new_context_with_model: n_ubatch = 256\n",
"llama_new_context_with_model: freq_base = 75000000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CUDA_Host KV buffer size = 33.00 MiB\n",
"llama_kv_cache_init: CUDA0 KV buffer size = 31.00 MiB\n",
"llama_new_context_with_model: KV self size = 64.00 MiB, K (f16): 32.00 MiB, V (f16): 32.00 MiB\n",
"llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB\n",
"llama_new_context_with_model: CUDA0 compute buffer size = 2734.94 MiB\n",
"llama_new_context_with_model: CUDA_Host compute buffer size = 12.25 MiB\n",
"llama_new_context_with_model: graph nodes = 2312\n",
"llama_new_context_with_model: graph splits = 433\n",
"\n",
"system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | \n",
"sampling: \n",
"\trepeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n",
"\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n",
"\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
"sampling order: \n",
"CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
"generate: n_ctx = 256, n_batch = 2048, n_predict = -1, n_keep = 1\n",
"\n",
"\n",
"\u001b[33m<|START_OF_TURN_TOKEN|><|USER_TOKEN|>まどか☆マギカで一番かわいいのは?考察をした上で回答してください。<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\u001b[0m「魔法少女まどか☆マギカ」に登場するキャラクターの中で、誰が一番かわいいかについては、好みや意見が分かれるところかもしれませんが、考察をした上で、鹿目まどかは「魔法少女まどか☆マギカ」の中で一番かわいいキャラクターの1人だと言えるでしょう。\n",
"\n",
"まず、彼女のデザインはとてもかわいらしく、ピンク色の髪と、大きな緑色の瞳が特徴的です。また、彼女は常に笑顔を絶やさず、その笑顔は誰が見ても可愛いと感じられるでしょう。性格も穏やかで優しく、誰に対しても優しく接するため、その振る舞いに魅力を感じる人は多いでしょう。\n",
"\n",
"また、魔法少女としての姿も可愛らしいです。まどかは変身した際に、ピンク色の衣装を身にまとい、リボンやハートのモチーフがあしらわれた可愛らしいコスチュームとなります。さらに、魔法少女として戦う姿も、勇気にあふれており、その勇姿に魅力を感じる人も多いでしょう。\n",
"\n",
"そして、彼女の物語も魅力的です。『魔法少女まどか☆マギカ』は、魔法少女たちが戦う物語ですが、その物語の中でまどかは、自分の信じる正義を貫き通し、その信念に魅力を感じる人は多いでしょう。\n",
"\n",
"以上のように、まどかの魅力は数多くあります。その魅力に触れれば、誰もがまどかの虜となるでしょう。an 1800 flowers promo code free shipping 1800flowers promo code free shipping\n",
"1800flowers promo code free shipping\n",
"1800flowers promo code free shipping\n",
"1800flowers promo code free shipping\n",
"1800flowers promo code free shipping\n",
"1800flowers promo code free shipping\n",
"180Binsons promo code free shipping\n",
"180binsons promo code free shipping\n",
"180binsons\n",
"\n",
"\n",
"llama_print_timings: load time = 6944.58 ms\n",
"\n",
"llama_print_timings: sample time = 116.88 ms / 416 runs ( 0.28 ms per token, 3559.08 tokens per second)\n",
"llama_print_timings: load time = 6944.58 ms\n",
"llama_print_timings: prompt eval time = 12056.33 ms / 27 tokens ( 446.53 ms per token, 2.24 tokens per second)\n",
"llama_print_timings: sample time = 116.88 ms / 416 runs ( 0.28 ms per token, 3559.08 tokens per second)\n",
"llama_print_timings: eval time = 274526.18 ms / 415 runs ( 661.51 ms per token, 1.51 tokens per second)\n",
"llama_print_timings: prompt eval time = 12056.33 ms / 27 tokens ( 446.53 ms per token, 2.24 tokens per second)\n",
"llama_print_timings: total time = 288158.09 ms / 442 tokens\n",
"llama_print_timings: eval time = 274526.18 ms / 415 runs ( 661.51 ms per token, 1.51 tokens per second)\n",
"CPU times: user 2.77 s, sys: 414 ms, total: 3.19 s\n",
"Wall time: 4min 57s\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%%time\n",
"!./main -p \"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>次の文章を要約してください。:地球は約1年の周期で太陽の周りを公転しており、この太陽の年周運動を天球上で表したもの(太陽の通り道)が黄道である。黄道と天の赤道は天球上の2箇所で交わっており、春分点は太陽が天の赤道を南から北へ横切る点で、ここを通過するのが春分(その属する日が春分日)である。<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\" --color -m /content/command-r-plus-Q2_K.gguf -ngl 31 -c 256"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wOHBFD4FHLEl",
"outputId": "49a497c4-0cde-4587-ec6b-4b4d18eb3f5d"
},
"execution_count": 14,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Log start\n",
"main: build = 2622 (8b6577b)\n",
"main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu\n",
"main: seed = 1712456269\n",
"llama_model_loader: loaded meta data with 23 key-value pairs and 642 tensors from /content/command-r-plus-Q2_K.gguf (version GGUF V3 (latest))\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = command-r\n",
"llama_model_loader: - kv 1: general.name str = c4ai-command-r-plus\n",
"llama_model_loader: - kv 2: command-r.block_count u32 = 64\n",
"llama_model_loader: - kv 3: command-r.context_length u32 = 131072\n",
"llama_model_loader: - kv 4: command-r.embedding_length u32 = 12288\n",
"llama_model_loader: - kv 5: command-r.feed_forward_length u32 = 33792\n",
"llama_model_loader: - kv 6: command-r.attention.head_count u32 = 96\n",
"llama_model_loader: - kv 7: command-r.attention.head_count_kv u32 = 8\n",
"llama_model_loader: - kv 8: command-r.rope.freq_base f32 = 75000000.000000\n",
"llama_model_loader: - kv 9: command-r.attention.layer_norm_epsilon f32 = 0.000010\n",
"llama_model_loader: - kv 10: general.file_type u32 = 10\n",
"llama_model_loader: - kv 11: command-r.logit_scale f32 = 0.833333\n",
"llama_model_loader: - kv 12: command-r.rope.scaling.type str = none\n",
"llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2\n",
"llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = [\"<PAD>\", \"<UNK>\", \"<CLS>\", \"<SEP>\", ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...\n",
"llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = [\"Ġ Ġ\", \"Ġ t\", \"e r\", \"i n\", \"Ġ a...\n",
"llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5\n",
"llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001\n",
"llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0\n",
"llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true\n",
"llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false\n",
"llama_model_loader: - kv 22: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 193 tensors\n",
"llama_model_loader: - type q2_K: 256 tensors\n",
"llama_model_loader: - type q3_K: 128 tensors\n",
"llama_model_loader: - type q4_K: 64 tensors\n",
"llama_model_loader: - type q6_K: 1 tensors\n",
"llm_load_vocab: special tokens definition check successful ( 1008/256000 ).\n",
"llm_load_print_meta: format = GGUF V3 (latest)\n",
"llm_load_print_meta: arch = command-r\n",
"llm_load_print_meta: vocab type = BPE\n",
"llm_load_print_meta: n_vocab = 256000\n",
"llm_load_print_meta: n_merges = 253333\n",
"llm_load_print_meta: n_ctx_train = 131072\n",
"llm_load_print_meta: n_embd = 12288\n",
"llm_load_print_meta: n_head = 96\n",
"llm_load_print_meta: n_head_kv = 8\n",
"llm_load_print_meta: n_layer = 64\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 12\n",
"llm_load_print_meta: n_embd_k_gqa = 1024\n",
"llm_load_print_meta: n_embd_v_gqa = 1024\n",
"llm_load_print_meta: f_norm_eps = 1.0e-05\n",
"llm_load_print_meta: f_norm_rms_eps = 0.0e+00\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 8.3e-01\n",
"llm_load_print_meta: n_ff = 33792\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = none\n",
"llm_load_print_meta: freq_base_train = 75000000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_yarn_orig_ctx = 131072\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = ?B\n",
"llm_load_print_meta: model ftype = Q2_K - Medium\n",
"llm_load_print_meta: model params = 103.81 B\n",
"llm_load_print_meta: model size = 36.77 GiB (3.04 BPW) \n",
"llm_load_print_meta: general.name = c4ai-command-r-plus\n",
"llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'\n",
"llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'\n",
"llm_load_print_meta: PAD token = 0 '<PAD>'\n",
"llm_load_print_meta: LF token = 136 'Ä'\n",
"ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no\n",
"ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes\n",
"ggml_cuda_init: found 1 CUDA devices:\n",
" Device 0: NVIDIA L4, compute capability 8.9, VMM: yes\n",
"llm_load_tensors: ggml ctx size = 0.49 MiB\n",
"llm_load_tensors: offloading 31 repeating layers to GPU\n",
"llm_load_tensors: offloaded 31/65 layers to GPU\n",
"llm_load_tensors: CPU buffer size = 37657.23 MiB\n",
"llm_load_tensors: CUDA0 buffer size = 17048.18 MiB\n",
"..........................................................................................\n",
"llama_new_context_with_model: n_ctx = 256\n",
"llama_new_context_with_model: n_batch = 256\n",
"llama_new_context_with_model: n_ubatch = 256\n",
"llama_new_context_with_model: freq_base = 75000000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CUDA_Host KV buffer size = 33.00 MiB\n",
"llama_kv_cache_init: CUDA0 KV buffer size = 31.00 MiB\n",
"llama_new_context_with_model: KV self size = 64.00 MiB, K (f16): 32.00 MiB, V (f16): 32.00 MiB\n",
"llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB\n",
"llama_new_context_with_model: CUDA0 compute buffer size = 2734.94 MiB\n",
"llama_new_context_with_model: CUDA_Host compute buffer size = 12.25 MiB\n",
"llama_new_context_with_model: graph nodes = 2312\n",
"llama_new_context_with_model: graph splits = 433\n",
"\n",
"system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | \n",
"sampling: \n",
"\trepeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n",
"\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n",
"\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
"sampling order: \n",
"CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
"generate: n_ctx = 256, n_batch = 2048, n_predict = -1, n_keep = 1\n",
"\n",
"\n",
"\u001b[33m<|START_OF_TURN_TOKEN|><|USER_TOKEN|>次の文章を要約してください。:地球は約1年の周期で太陽の周りを公転しており、この太陽の年周運動を天球上で表したもの(太陽の通り道)が黄道である。黄道と天の赤道は天球上の2箇所で交わっており、春分点は太陽が天の赤道を南から北へ横切る点で、ここを通過するのが春分(その属する日が春分日)である。<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\u001b[0m太陽の年周運動の通り道である黄道と天の赤道が交差する春分点を太陽が通過するのが春分です。 [end of text]\n",
"\n",
"llama_print_timings: load time = 6971.40 ms\n",
"llama_print_timings: sample time = 8.93 ms / 31 runs ( 0.29 ms per token, 3471.44 tokens per second)\n",
"llama_print_timings: prompt eval time = 6125.68 ms / 107 tokens ( 57.25 ms per token, 17.47 tokens per second)\n",
"llama_print_timings: eval time = 19887.48 ms / 30 runs ( 662.92 ms per token, 1.51 tokens per second)\n",
"llama_print_timings: total time = 26097.30 ms / 137 tokens\n",
"Log end\n",
"CPU times: user 317 ms, sys: 44.5 ms, total: 361 ms\n",
"Wall time: 35.3 s\n"
]
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment