luhkronn commited on
Commit
ffb99ea
·
verified ·
1 Parent(s): c67852b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,36 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ figures/Bench.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model files that are too large for git
2
+ *.safetensors
3
+ *.bin
4
+ *.pt
5
+ *.pth
6
+
7
+ # Python cache
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+ *.so
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+
29
+ # Virtual environments
30
+ venv/
31
+ env/
32
+ ENV/
33
+
34
+ # IDE files
35
+ .vscode/
36
+ .idea/
37
+ *.swp
38
+ *.swo
39
+
40
+ # OS files
41
+ .DS_Store
42
+ Thumbs.db
43
+
44
+ # Logs
45
+ *.log
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-M2 Model Repository
2
+
3
+ This is the official MiniMax-M2 model repository containing a 230B parameter MoE model with 10B active parameters, optimized for coding and agentic workflows.
4
+
5
+ ## Model Information
6
+
7
+ - **Model Type**: Mixture of Experts (MoE)
8
+ - **Total Parameters**: 230B
9
+ - **Active Parameters**: 10B
10
+ - **Architecture**: Transformer-based MoE
11
+ - **License**: Modified MIT
12
+ - **Pipeline Tag**: text-generation
13
+
14
+ ## Usage
15
+
16
+ This model can be used with various inference frameworks:
17
+
18
+ ### Transformers
19
+ ```python
20
+ from transformers import AutoModelForCausalLM, AutoTokenizer
21
+
22
+ model = AutoModelForCausalLM.from_pretrained("your-username/MiniMax-M2")
23
+ tokenizer = AutoTokenizer.from_pretrained("your-username/MiniMax-M2")
24
+ ```
25
+
26
+ ### vLLM
27
+ ```python
28
+ from vllm import LLM, SamplingParams
29
+
30
+ llm = LLM(model="your-username/MiniMax-M2")
31
+ ```
32
+
33
+ ### SGLang
34
+ ```python
35
+ from sglang import function, system, user, assistant, gen, select
36
+
37
+ @function
38
+ def multi_turn_question(s, question):
39
+ s += system("You are a helpful assistant.")
40
+ s += user(question)
41
+ s += assistant(gen("answer", max_tokens=256))
42
+ return s["answer"]
43
+ ```
44
+
45
+ ## Model Details
46
+
47
+ - **Context Length**: 128K tokens
48
+ - **Thinking Format**: Uses `<think>...</think>` tags for reasoning
49
+ - **Recommended Parameters**:
50
+ - Temperature: 1.0
51
+ - Top-p: 0.95
52
+ - Top-k: 40
53
+
54
+ ## Deployment Guides
55
+
56
+ See the `docs/` directory for detailed deployment guides:
57
+ - [Transformers Guide](docs/transformers_deploy_guide.md)
58
+ - [vLLM Guide](docs/vllm_deploy_guide.md)
59
+ - [SGLang Guide](docs/sglang_deploy_guide.md)
60
+ - [MLX Guide](docs/mlx_deploy_guide.md)
61
+
62
+ ## License
63
+
64
+ This model is released under the Modified MIT License. See the [license file](https://github.com/MiniMax-AI/MiniMax-M2/blob/main/LICENSE) for details.
chat_template.jinja ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# ----------‑‑‑ special token variables ‑‑‑---------- #}
2
+ {%- set toolcall_begin_token = '<minimax:tool_call>' -%}
3
+ {%- set toolcall_end_token = '</minimax:tool_call>' -%}
4
+ {#- Tool Rendering Functions ============================================== -#}
5
+ {%- macro render_tool_namespace(namespace_name, tool_list) -%}
6
+ {%- for tool in tool_list -%}
7
+ <tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
8
+ {% endfor -%}
9
+ {%- endmacro -%}
10
+ {%- macro visible_text(content) -%}
11
+ {%- if content is string -%}
12
+ {{ content }}
13
+ {%- elif content is iterable and content is not mapping -%}
14
+ {%- for item in content -%}
15
+ {%- if item is mapping and item.type == 'text' -%}
16
+ {{- item.text }}
17
+ {%- elif item is string -%}
18
+ {{- item }}
19
+ {%- endif -%}
20
+ {%- endfor -%}
21
+ {%- elif content is none -%}
22
+ {{- '' }}
23
+ {%- else -%}
24
+ {{- content }}
25
+ {%- endif -%}
26
+ {%- endmacro -%}
27
+ {#- System Message Construction ============================================ -#}
28
+ {%- macro build_system_message(system_message) -%}
29
+ {%- if system_message and system_message.content -%}
30
+ {{- visible_text(system_message.content) }}
31
+ {%- else -%}
32
+ {%- if model_identity is not defined -%}
33
+ {%- set model_identity = "You are a helpful assistant." -%}
34
+ {%- endif -%}
35
+ {{- model_identity }}
36
+ {%- endif -%}
37
+
38
+ {#- Handle current_date -#}
39
+ {%- if system_message and system_message.current_date -%}
40
+ {{- '\n' ~ 'Current date: ' + system_message.current_date }}
41
+ {%- endif -%}
42
+ {#- Handle current_location -#}
43
+ {%- if system_message and system_message.current_location -%}
44
+ {{- '\n' ~ 'Current location: ' + system_message.current_location }}
45
+ {%- endif -%}
46
+ {%- endmacro -%}
47
+ {#- Main Template Logic ================================================= -#}
48
+ {#- Extract system message (only first message if it's system) -#}
49
+ {%- set system_message = none -%}
50
+ {%- set conversation_messages = messages -%}
51
+ {%- if messages and messages[0].role == "system" -%}
52
+ {%- set system_message = messages[0] -%}
53
+ {%- set conversation_messages = messages[1:] -%}
54
+ {%- endif -%}
55
+ {#- Get the last user message turn, for interleved thinking -#}
56
+ {%- set ns = namespace(last_user_index=-1) %}
57
+ {% for m in conversation_messages %}
58
+ {%- if m.role == 'user' %}
59
+ {% set ns.last_user_index = loop.index0 -%}
60
+ {%- endif %}
61
+ {%- endfor %}
62
+ {#- Render system message -#}
63
+ {{- ']~!b[' ~ ']~b]system' ~ '\n' }}
64
+ {{- build_system_message(system_message) }}
65
+ {#- Render tools if available -#}
66
+ {%- if tools -%}
67
+ {{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
68
+ {{- '\n' ~ '<tools>' ~ '\n' }}
69
+ {{- render_tool_namespace("functions", tools) }}
70
+ {{- '</tools>' ~ '\n\n' }}
71
+ {{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
72
+ {{- '\n' ~ toolcall_begin_token }}
73
+ <invoke name="tool-name-1">
74
+ <parameter name="param-key-1">param-value-1</parameter>
75
+ <parameter name="param-key-2">param-value-2</parameter>
76
+ ...
77
+ </invoke>
78
+ {{- '\n' ~ toolcall_end_token }}
79
+ {%- endif -%}
80
+ {{- '[e~[\n' }}
81
+
82
+ {#- Render messages -#}
83
+ {%- set last_tool_call = namespace(name=none) -%}
84
+ {%- for message in conversation_messages -%}
85
+ {%- if message.role == 'assistant' -%}
86
+ {#- Only render reasoning_content if no user message follows -#}
87
+ {{- ']~b]ai' ~ '\n' }}
88
+
89
+ {%- set reasoning_content = '' %}
90
+ {%- set content = visible_text(message.content) %}
91
+ {%- if message.reasoning_content is string %}
92
+ {%- set reasoning_content = message.reasoning_content %}
93
+ {%- else %}
94
+ {%- if '</think>' in content %}
95
+ {%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
96
+ {%- set content = content.split('</think>')[-1].strip('\n') %}
97
+ {%- endif %}
98
+ {%- endif %}
99
+ {%- if reasoning_content and loop.index0 > ns.last_user_index -%}
100
+ {{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
101
+ {%- endif -%}
102
+ {%- if content -%}
103
+ {{- content }}
104
+ {%- endif -%}
105
+ {%- if message.tool_calls -%}
106
+ {{- '\n' ~ toolcall_begin_token ~ '\n' }}
107
+
108
+ {%- for tool_call in message.tool_calls -%}
109
+ {%- if tool_call.function %}
110
+ {%- set tool_call = tool_call.function %}
111
+ {%- endif %}
112
+ {{- '<invoke name="' + tool_call.name + '">' }}
113
+ {% set _args = tool_call.arguments %}
114
+ {%- for k, v in _args.items() %}
115
+ {{- '<parameter name="' + k + '">' }}
116
+ {{- v | tojson(ensure_ascii=False) if v is not string else v }}
117
+ {{- '</parameter>' }}
118
+ {% endfor %}
119
+ {{- '</invoke>' ~ '\n' }}
120
+ {%- endfor -%}
121
+
122
+ {{- toolcall_end_token}}
123
+ {%- if message.tool_calls[-1].function -%}
124
+ {%- set last_tool_call.name = message.tool_calls[-1].function.name -%}
125
+ {%- else -%}
126
+ {%- set last_tool_call.name = message.tool_calls[-1].name -%}
127
+ {%- endif -%}
128
+ {%- else -%}
129
+ {%- set last_tool_call.name = none -%}
130
+ {%- endif -%}
131
+ {{- '[e~[' ~ '\n' }}
132
+
133
+ {%- elif message.role == 'tool' -%}
134
+ {%- if last_tool_call.name is none -%}
135
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
136
+ {%- endif -%}
137
+ {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
138
+ {{- ']~b]tool' }}
139
+ {%- endif -%}
140
+ {%- if message.content is string -%}
141
+ {{- '\n<response>' }}
142
+ {{- message.content }}
143
+ {{- '</response>' }}
144
+ {%- else -%}
145
+ {%- for tr in message.content -%}
146
+ {{- '\n<response>' }}
147
+ {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
148
+ {{- '\n</response>' }}
149
+ {%- endfor -%}
150
+ {%- endif -%}
151
+ {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
152
+ {{- '[e~[\n' -}}
153
+ {%- endif -%}
154
+
155
+ {%- elif message.role == 'user' -%}
156
+ {{- ']~b]user' ~ '\n' }}
157
+ {{- visible_text(message.content) }}
158
+ {{- '[e~[' ~ '\n' }}
159
+ {%- endif -%}
160
+ {%- endfor -%}
161
+
162
+ {#- Generation prompt -#}
163
+ {%- if add_generation_prompt -%}
164
+ {{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
165
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MiniMaxM2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "attn_type_list": [
7
+ 1,
8
+ 1,
9
+ 1,
10
+ 1,
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 1,
18
+ 1,
19
+ 1,
20
+ 1,
21
+ 1,
22
+ 1,
23
+ 1,
24
+ 1,
25
+ 1,
26
+ 1,
27
+ 1,
28
+ 1,
29
+ 1,
30
+ 1,
31
+ 1,
32
+ 1,
33
+ 1,
34
+ 1,
35
+ 1,
36
+ 1,
37
+ 1,
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 1,
42
+ 1,
43
+ 1,
44
+ 1,
45
+ 1,
46
+ 1,
47
+ 1,
48
+ 1,
49
+ 1,
50
+ 1,
51
+ 1,
52
+ 1,
53
+ 1,
54
+ 1,
55
+ 1,
56
+ 1,
57
+ 1,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 1,
62
+ 1,
63
+ 1,
64
+ 1,
65
+ 1,
66
+ 1,
67
+ 1,
68
+ 1
69
+ ],
70
+ "auto_map": {
71
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
72
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
73
+ },
74
+ "bos_token_id": null,
75
+ "eos_token_id": null,
76
+ "head_dim": 128,
77
+ "hidden_act": "silu",
78
+ "hidden_size": 3072,
79
+ "initializer_range": 0.02,
80
+ "intermediate_size": 1536,
81
+ "layernorm_full_attention_beta": 1.0,
82
+ "layernorm_linear_attention_beta": 1.0,
83
+ "layernorm_mlp_beta": 1.0,
84
+ "max_position_embeddings": 196608,
85
+ "mlp_intermediate_size": 8192,
86
+ "model_type": "minimax_m2",
87
+ "mtp_transformer_layers": 1,
88
+ "num_attention_heads": 48,
89
+ "num_experts_per_tok": 8,
90
+ "num_hidden_layers": 62,
91
+ "num_key_value_heads": 8,
92
+ "num_local_experts": 256,
93
+ "num_mtp_modules": 3,
94
+ "output_router_logits": false,
95
+ "qk_norm_type": "per_layer",
96
+ "quantization_config": {
97
+ "activation_scheme": "dynamic",
98
+ "fmt": "float8_e4m3fn",
99
+ "quant_method": "fp8",
100
+ "weight_block_size": [
101
+ 128,
102
+ 128
103
+ ],
104
+ "modules_to_not_convert": [
105
+ "gate",
106
+ "e_score_correction_bias",
107
+ "lm_head"
108
+ ]
109
+ },
110
+ "rms_norm_eps": 1e-06,
111
+ "rope_theta": 5000000,
112
+ "rotary_dim": 64,
113
+ "router_aux_loss_coef": 0.001,
114
+ "router_jitter_noise": 0.0,
115
+ "scoring_func": "sigmoid",
116
+ "shared_intermediate_size": 0,
117
+ "shared_moe_mode": "sigmoid",
118
+ "sliding_window": null,
119
+ "tie_word_embeddings": false,
120
+ "transformers_version": "4.57.1",
121
+ "use_cache": true,
122
+ "use_mtp": true,
123
+ "use_qk_norm": true,
124
+ "use_routing_bias": true,
125
+ "vocab_size": 200064
126
+ }
configuration_minimax_m2.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from transformers.configuration_utils import PretrainedConfig
24
+
25
+
26
+ class MiniMaxM2Config(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
29
+ MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the MiniMaxM2-7B-v0.1 or MiniMaxM2-7B-Instruct-v0.1.
31
+
32
+ [minimax_m2ai/MiniMaxM2-8x7B](https://huggingface.co/minimax_m2ai/MiniMaxM2-8x7B)
33
+ [minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1](https://huggingface.co/minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1)
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32000):
41
+ Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`MiniMaxM2Model`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 14336):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 8):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details, check out [this
57
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
58
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
59
+ The attention head dimension.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
63
+ The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
64
+ allows sequence of up to 4096*32 tokens.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ pad_token_id (`int`, *optional*):
73
+ The id of the padding token.
74
+ bos_token_id (`int`, *optional*, defaults to 1):
75
+ The id of the "beginning-of-sequence" token.
76
+ eos_token_id (`int`, *optional*, defaults to 2):
77
+ The id of the "end-of-sequence" token.
78
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
79
+ Whether the model's input and output word embeddings should be tied.
80
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
81
+ The base period of the RoPE embeddings.
82
+ sliding_window (`int`, *optional*):
83
+ Sliding window attention window size. If not specified, will default to `4096`.
84
+ attention_dropout (`float`, *optional*, defaults to 0.0):
85
+ The dropout ratio for the attention probabilities.
86
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
87
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
88
+ parameter
89
+ num_local_experts (`int`, *optional*, defaults to 8):
90
+ Number of experts per Sparse MLP layer.
91
+ output_router_logits (`bool`, *optional*, defaults to `False`):
92
+ Whether or not the router logits should be returned by the model. Enabling this will also
93
+ allow the model to output the auxiliary loss. See [here]() for more details
94
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
95
+ The aux loss factor for the total loss.
96
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
97
+ Amount of noise to add to the router.
98
+
99
+ ```python
100
+ >>> from transformers import MiniMaxM2Model, MiniMaxM2Config
101
+
102
+ >>> # Initializing a MiniMaxM2 7B style configuration
103
+ >>> configuration = MiniMaxM2Config()
104
+
105
+ >>> # Initializing a model from the MiniMaxM2 7B style configuration
106
+ >>> model = MiniMaxM2Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "minimax_m2"
113
+ keys_to_ignore_at_inference = ["past_key_values"]
114
+ base_model_tp_plan = {
115
+ "layers.*.self_attn.q_proj": "colwise",
116
+ "layers.*.self_attn.k_proj": "colwise",
117
+ "layers.*.self_attn.v_proj": "colwise",
118
+ "layers.*.self_attn.o_proj": "rowwise",
119
+ "layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts
120
+ "layers.*.block_sparse_moe.experts.*.w1": "colwise",
121
+ "layers.*.block_sparse_moe.experts.*.w2": "rowwise",
122
+ "layers.*.block_sparse_moe.experts.*.w3": "colwise",
123
+ }
124
+ base_model_pp_plan = {
125
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
126
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
127
+ "norm": (["hidden_states"], ["hidden_states"]),
128
+ }
129
+
130
+ def __init__(
131
+ self,
132
+ vocab_size=32000,
133
+ hidden_size=4096,
134
+ intermediate_size=14336,
135
+ num_hidden_layers=32,
136
+ num_attention_heads=32,
137
+ num_key_value_heads=8,
138
+ head_dim=None,
139
+ hidden_act="silu",
140
+ max_position_embeddings=4096 * 32,
141
+ initializer_range=0.02,
142
+ rms_norm_eps=1e-5,
143
+ use_cache=True,
144
+ pad_token_id=None,
145
+ bos_token_id=1,
146
+ eos_token_id=2,
147
+ tie_word_embeddings=False,
148
+ rope_theta=1e6,
149
+ sliding_window=None,
150
+ attention_dropout=0.0,
151
+ num_experts_per_tok=2,
152
+ num_local_experts=8,
153
+ output_router_logits=False,
154
+ router_aux_loss_coef=0.001,
155
+ router_jitter_noise=0.0,
156
+ **kwargs,
157
+ ):
158
+ self.vocab_size = vocab_size
159
+ self.max_position_embeddings = max_position_embeddings
160
+ self.hidden_size = hidden_size
161
+ self.intermediate_size = intermediate_size
162
+ self.num_hidden_layers = num_hidden_layers
163
+ self.num_attention_heads = num_attention_heads
164
+ self.sliding_window = sliding_window
165
+
166
+ # for backward compatibility
167
+ if num_key_value_heads is None:
168
+ num_key_value_heads = num_attention_heads
169
+
170
+ self.num_key_value_heads = num_key_value_heads
171
+ self.hidden_act = hidden_act
172
+ self.initializer_range = initializer_range
173
+ self.rms_norm_eps = rms_norm_eps
174
+ self.use_cache = use_cache
175
+ self.rope_theta = rope_theta
176
+ self.attention_dropout = attention_dropout
177
+ self.head_dim = head_dim
178
+
179
+ self.num_experts_per_tok = num_experts_per_tok
180
+ self.num_local_experts = num_local_experts
181
+ self.output_router_logits = output_router_logits
182
+ self.router_aux_loss_coef = router_aux_loss_coef
183
+ self.router_jitter_noise = router_jitter_noise
184
+
185
+ self.use_qk_norm = kwargs.pop("use_qk_norm", False)
186
+ self.rotary_dim = kwargs.pop("rotary_dim", self.head_dim)
187
+ self.partial_rotary_factor = kwargs.pop("partial_rotary_factor", 1)
188
+ if self.head_dim is not None:
189
+ self.partial_rotary_factor = self.rotary_dim / self.head_dim
190
+
191
+ super().__init__(
192
+ pad_token_id=pad_token_id,
193
+ bos_token_id=bos_token_id,
194
+ eos_token_id=eos_token_id,
195
+ tie_word_embeddings=tie_word_embeddings,
196
+ **kwargs,
197
+ )
198
+
199
+
200
+ __all__ = ["MiniMaxM2Config"]
docs/mlx_deploy_guide.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## MLX deployment guide
2
+
3
+ Run, serve, and fine-tune [**MiniMax-M2**](https://huggingface.co/MiniMaxAI/MiniMax-M2) locally on your Mac using the **MLX** framework. This guide gets you up and running quickly.
4
+
5
+ > **Requirements**
6
+ > - Apple Silicon Mac (M3 Ultra or later)
7
+ > - **At least 256GB of unified memory (RAM)**
8
+
9
+
10
+ **Installation**
11
+
12
+ Install the `mlx-lm` package via pip:
13
+
14
+ ```bash
15
+ pip install -U mlx-lm
16
+ ```
17
+
18
+ **CLI**
19
+
20
+ Generate text directly from the terminal:
21
+
22
+ ```bash
23
+ mlx_lm.generate \
24
+ --model mlx-community/MiniMax-M2-4bit \
25
+ --prompt "How tall is Mount Everest?"
26
+ ```
27
+
28
+ > Add `--max-tokens 256` to control response length, or `--temp 0.7` for creativity.
29
+
30
+ **Python Script Example**
31
+
32
+ Use `mlx-lm` in your own Python scripts:
33
+
34
+ ```python
35
+ from mlx_lm import load, generate
36
+
37
+ # Load the quantized model
38
+ model, tokenizer = load("mlx-community/MiniMax-M2-4bit")
39
+
40
+ prompt = "Hello, how are you?"
41
+
42
+ # Apply chat template if available (recommended for chat models)
43
+ if tokenizer.chat_template is not None:
44
+ messages = [{"role": "user", "content": prompt}]
45
+ prompt = tokenizer.apply_chat_template(
46
+ messages,
47
+ tokenize=False,
48
+ add_generation_prompt=True
49
+ )
50
+
51
+ # Generate response
52
+ response = generate(
53
+ model,
54
+ tokenizer,
55
+ prompt=prompt,
56
+ max_tokens=256,
57
+ temp=0.7,
58
+ verbose=True
59
+ )
60
+
61
+ print(response)
62
+ ```
63
+
64
+ **Tips**
65
+ - **Model variants**: Check this [MLX community collection on Hugging Face](https://huggingface.co/collections/mlx-community/minimax-m2) for `MiniMax-M2-4bit`, `6bit`, `8bit`, or `bfloat16` versions.
66
+ - **Fine-tuning**: Use `mlx-lm.lora` for efficient parameter-efficient fine-tuning (PEFT).
67
+
68
+ **Resources**
69
+ - GitHub: [https://github.com/ml-explore/mlx-lm](https://github.com/ml-explore/mlx-lm)
70
+ - Models: [https://huggingface.co/mlx-community](https://huggingface.co/mlx-community)
docs/sglang_deploy_guide.md ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2 Model SGLang Deployment Guide
2
+
3
+ [English Version](./sglang_deploy_guide.md) | [Chinese Version](./sglang_deploy_guide_cn.md)
4
+
5
+ We recommend using [SGLang](https://github.com/sgl-project/sglang) to deploy the [MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2) model. SGLang is a high-performance inference engine with excellent serving throughput, efficient and intelligent memory management, powerful batch request processing capabilities, and deeply optimized underlying performance. We recommend reviewing SGLang's official documentation to check hardware compatibility before deployment.
6
+
7
+ ## Applicable Models
8
+
9
+ This document applies to the following models. You only need to change the model name during deployment.
10
+
11
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
12
+
13
+ The deployment process is illustrated below using MiniMax-M2 as an example.
14
+
15
+ ## System Requirements
16
+
17
+ - OS: Linux
18
+
19
+ - Python: 3.9 - 3.12
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - Memory requirements: 220 GB for weights, 240 GB per 1M context tokens
26
+
27
+ The following are recommended configurations; actual requirements should be adjusted based on your use case:
28
+
29
+ - 4x 96GB GPUs: Supported context length of up to 400K tokens.
30
+
31
+ - 8x 144GB GPUs: Supported context length of up to 3M tokens.
32
+
33
+ ## Deployment with Python
34
+
35
+ It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
36
+
37
+ We recommend installing SGLang in a fresh Python environment:
38
+
39
+ ```bash
40
+ git clone -b v0.5.4.post1 https://github.com/sgl-project/sglang.git
41
+ cd sglang
42
+
43
+ # Install the python packages
44
+ pip install --upgrade pip
45
+ pip install -e "python"
46
+ ```
47
+
48
+ Run the following command to start the SGLang server. SGLang will automatically download and cache the MiniMax-M2 model from Hugging Face.
49
+
50
+ 4-GPU deployment command:
51
+
52
+ ```bash
53
+ python -m sglang.launch_server \
54
+ --model-path MiniMaxAI/MiniMax-M2 \
55
+ --tp-size 4 \
56
+ --tool-call-parser minimax-m2 \
57
+ --reasoning-parser minimax-append-think \
58
+ --host 0.0.0.0 \
59
+ --trust-remote-code \
60
+ --port 8000 \
61
+ --mem-fraction-static 0.85
62
+ ```
63
+
64
+ 8-GPU deployment command:
65
+
66
+ ```bash
67
+ python -m sglang.launch_server \
68
+ --model-path MiniMaxAI/MiniMax-M2 \
69
+ --tp-size 8 \
70
+ --ep-size 8 \
71
+ --tool-call-parser minimax-m2 \
72
+ --trust-remote-code \
73
+ --host 0.0.0.0 \
74
+ --reasoning-parser minimax-append-think \
75
+ --port 8000 \
76
+ --mem-fraction-static 0.85
77
+ ```
78
+
79
+ ## Testing Deployment
80
+
81
+ After startup, you can test the SGLang OpenAI-compatible API with the following command:
82
+
83
+ ```bash
84
+ curl http://localhost:8000/v1/chat/completions \
85
+ -H "Content-Type: application/json" \
86
+ -d '{
87
+ "model": "MiniMaxAI/MiniMax-M2",
88
+ "messages": [
89
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
90
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
91
+ ]
92
+ }'
93
+ ```
94
+
95
+ ## Common Issues
96
+
97
+ ### Hugging Face Network Issues
98
+
99
+ If you encounter network issues, you can set up a proxy before pulling the model.
100
+
101
+ ```bash
102
+ export HF_ENDPOINT=https://hf-mirror.com
103
+ ```
104
+
105
+ ### MiniMax-M2 model is not currently supported
106
+
107
+ Please upgrade to the latest stable version, >= v0.5.4.post3.
108
+
109
+ ## Getting Support
110
+
111
+ If you encounter any issues while deploying the MiniMax model:
112
+
113
+ - Contact our technical support team through official channels such as email at [[email protected]](mailto:[email protected])
114
+
115
+ - Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
116
+
117
+ We continuously optimize the deployment experience for our models. Feedback is welcome!
118
+
docs/sglang_deploy_guide_cn.md ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2 模型 SGLang 部署指南
2
+
3
+ [英文版](./sglang_deploy_guide.md) | [中文版](./sglang_deploy_guide_cn.md)
4
+
5
+ 我们推荐使用 [SGLang](https://github.com/sgl-project/sglang) 来部署 [MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2) 模型。SGLang 是一个高性能的推理引擎,其具有卓越的服务吞吐、高效智能的内存管理机制、强大的批量请求处理能力、深度优化的底层性能等特性。我们建议在部署之前查看 SGLang 的官方文档以检查硬件兼容性。
6
+
7
+ ## 本文档适用模型
8
+
9
+ 本文档适用以下模型,只需在部署时修改模型名称即可。
10
+
11
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
12
+
13
+ 以下以 MiniMax-M2 为例说明部署流程。
14
+
15
+ ## 环境要求
16
+
17
+ - OS:Linux
18
+
19
+ - Python:3.9 - 3.12
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - 显存需求:权重需要 220 GB,每 1M 上下文 token 需要 240 GB
26
+
27
+ 以下为推荐配置,实际需求请根据业务场景调整:
28
+
29
+ - 96G x4 GPU:支持 40 万 token 的总上下文。
30
+
31
+ - 144G x8 GPU:支持长达 300 万 token 的总上下文。
32
+
33
+ ## 使用 Python 部署
34
+
35
+ 建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
36
+
37
+ 建议在全新的 Python 环境中安装 SGLang:
38
+ ```bash
39
+ git clone -b v0.5.4.post1 https://github.com/sgl-project/sglang.git
40
+ cd sglang
41
+
42
+ # Install the python packages
43
+ pip install --upgrade pip
44
+ pip install -e "python"
45
+ ```
46
+
47
+ 运行如下命令启动 SGLang 服务器,SGLang 会自动从 Huggingface 下载并缓存 MiniMax-M2 模型。
48
+
49
+ 4 卡部署命令:
50
+
51
+ ```bash
52
+ python -m sglang.launch_server \
53
+ --model-path MiniMaxAI/MiniMax-M2 \
54
+ --tp-size 4 \
55
+ --tool-call-parser minimax-m2 \
56
+ --reasoning-parser minimax-append-think \
57
+ --host 0.0.0.0 \
58
+ --trust-remote-code \
59
+ --port 8000 \
60
+ --mem-fraction-static 0.85
61
+ ```
62
+
63
+ 8 卡部署命令:
64
+
65
+ ```bash
66
+ python -m sglang.launch_server \
67
+ --model-path MiniMaxAI/MiniMax-M2 \
68
+ --tp-size 8 \
69
+ --ep-size 8 \
70
+ --tool-call-parser minimax-m2 \
71
+ --trust-remote-code \
72
+ --host 0.0.0.0 \
73
+ --reasoning-parser minimax-append-think \
74
+ --port 8000 \
75
+ --mem-fraction-static 0.85
76
+ ```
77
+
78
+ ## 测试部署
79
+
80
+ 启动后,可以通过如下命令测试 SGLang OpenAI 兼容接口:
81
+
82
+ ```bash
83
+ curl http://localhost:8000/v1/chat/completions \
84
+ -H "Content-Type: application/json" \
85
+ -d '{
86
+ "model": "MiniMaxAI/MiniMax-M2",
87
+ "messages": [
88
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
89
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
90
+ ]
91
+ }'
92
+ ```
93
+
94
+ ## 常见问题
95
+
96
+ ### Huggingface 网络问题
97
+
98
+ 如果遇到网络问题,可以设置代理后再进行拉取。
99
+
100
+ ```bash
101
+ export HF_ENDPOINT=https://hf-mirror.com
102
+ ```
103
+
104
+ ### MiniMax-M2 model is not currently supported
105
+
106
+ 请升级到最新的稳定版本, >= v0.5.4.post1.
107
+
108
+ ## 获取支持
109
+
110
+ 如果在部署 MiniMax 模型过程中遇到任何问题:
111
+
112
+ - 通过邮箱 [[email protected]](mailto:[email protected]) 等官方渠道联系我们的技术支持团队
113
+
114
+ - 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
115
+
116
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
117
+
118
+ 我们会持续优化模型的部署体验,欢迎反馈!
docs/tool_calling_guide.md ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-M2 Tool Calling Guide
2
+
3
+ [English Version](./tool_calling_guide.md) | [Chinese Version](./tool_calling_guide_cn.md)
4
+
5
+ ## Introduction
6
+
7
+ The MiniMax-M2 model supports tool calling capabilities, enabling the model to identify when external tools need to be called and output tool call parameters in a structured format. This document provides detailed instructions on how to use the tool calling features of MiniMax-M2.
8
+
9
+ ## Basic Example
10
+
11
+ The following Python script implements a weather query tool call example based on the OpenAI SDK:
12
+
13
+ ```python
14
+ from openai import OpenAI
15
+ import json
16
+
17
+ client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
18
+
19
+ def get_weather(location: str, unit: str):
20
+ return f"Getting the weather for {location} in {unit}..."
21
+
22
+ tool_functions = {"get_weather": get_weather}
23
+
24
+ tools = [{
25
+ "type": "function",
26
+ "function": {
27
+ "name": "get_weather",
28
+ "description": "Get the current weather in a given location",
29
+ "parameters": {
30
+ "type": "object",
31
+ "properties": {
32
+ "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"},
33
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
34
+ },
35
+ "required": ["location", "unit"]
36
+ }
37
+ }
38
+ }]
39
+
40
+ response = client.chat.completions.create(
41
+ model=client.models.list().data[0].id,
42
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco? use celsius."}],
43
+ tools=tools,
44
+ tool_choice="auto"
45
+ )
46
+
47
+ print(response)
48
+
49
+ tool_call = response.choices[0].message.tool_calls[0].function
50
+ print(f"Function called: {tool_call.name}")
51
+ print(f"Arguments: {tool_call.arguments}")
52
+ print(f"Result: {get_weather(**json.loads(tool_call.arguments))}")
53
+ ```
54
+
55
+ **Output Example:**
56
+ ```
57
+ Function called: get_weather
58
+ Arguments: {"location": "San Francisco, CA", "unit": "celsius"}
59
+ Result: Getting the weather for San Francisco, CA in celsius...
60
+ ```
61
+
62
+ ## Manually Parsing Model Output
63
+
64
+ **We strongly recommend using vLLM or SGLang for parsing tool calls.** If you cannot use the built-in parser of inference engines (e.g., vLLM and SGLang) that support MiniMax-M2, or need to use other inference frameworks (such as transformers, TGI, etc.), you can manually parse the model's raw output using the following method. This approach requires you to parse the XML tag format of the model output yourself.
65
+
66
+ ### Example Using Transformers
67
+
68
+ Here is a complete example using the transformers library:
69
+
70
+ ```python
71
+ from transformers import AutoTokenizer
72
+
73
+ def get_default_tools():
74
+ return [
75
+ {
76
+ "name": "get_current_weather",
77
+ "description": "Get the latest weather for a location",
78
+ "parameters": {
79
+ "type": "object",
80
+ "properties": {
81
+ "location": {
82
+ "type": "string",
83
+ "description": "A certain city, such as Beijing, Shanghai"
84
+ }
85
+ },
86
+ }
87
+ "required": ["location"],
88
+ "type": "object"
89
+ }
90
+ ]
91
+
92
+ # Load model and tokenizer
93
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
94
+ prompt = "What's the weather like in Shanghai today?"
95
+ messages = [
96
+ {"role": "system", "content": "You are a helpful assistant."},
97
+ {"role": "user", "content": prompt},
98
+ ]
99
+
100
+ # Enable function calling tools
101
+ tools = get_default_tools()
102
+
103
+ # Apply chat template and include tool definitions
104
+ text = tokenizer.apply_chat_template(
105
+ messages,
106
+ tokenize=False,
107
+ add_generation_prompt=True,
108
+ tools=tools
109
+ )
110
+
111
+ # Send request (using any inference service)
112
+ import requests
113
+ payload = {
114
+ "model": "MiniMaxAI/MiniMax-M2",
115
+ "prompt": text,
116
+ "max_tokens": 4096
117
+ }
118
+ response = requests.post(
119
+ "http://localhost:8000/v1/completions",
120
+ headers={"Content-Type": "application/json"},
121
+ json=payload,
122
+ stream=False,
123
+ )
124
+
125
+ # Model output needs manual parsing
126
+ raw_output = response.json()["choices"][0]["text"]
127
+ print("Raw output:", raw_output)
128
+
129
+ # Use the parsing function below to process the output
130
+ tool_calls = parse_tool_calls(raw_output, tools)
131
+ ```
132
+
133
+ ## 🛠️ Tool Call Definition
134
+
135
+ ### Tool Structure
136
+
137
+ Tool calls need to define the `tools` field in the request body. Each tool consists of the following parts:
138
+
139
+ ```json
140
+ {
141
+ "tools": [
142
+ {
143
+ "name": "search_web",
144
+ "description": "Search function.",
145
+ "parameters": {
146
+ "properties": {
147
+ "query_list": {
148
+ "description": "Keywords for search, list should contain 1 element.",
149
+ "items": { "type": "string" },
150
+ "type": "array"
151
+ },
152
+ "query_tag": {
153
+ "description": "Category of query",
154
+ "items": { "type": "string" },
155
+ "type": "array"
156
+ }
157
+ },
158
+ "required": [ "query_list", "query_tag" ],
159
+ "type": "object"
160
+ }
161
+ }
162
+ ]
163
+ }
164
+ ```
165
+
166
+ **Field Descriptions:**
167
+ - `name`: Function name
168
+ - `description`: Function description
169
+ - `parameters`: Function parameter definition
170
+ - `properties`: Parameter property definition, where key is the parameter name and value contains detailed parameter description
171
+ - `required`: List of required parameters
172
+ - `type`: Parameter type (usually "object")
173
+
174
+ ### Internal Processing Format
175
+
176
+ When processing within the MiniMax-M2 model, tool definitions are converted to a special format and concatenated to the input text. Here is a complete example:
177
+
178
+ ```
179
+ ]~!b[]~b]system
180
+ You are a helpful assistant.
181
+
182
+ # Tools
183
+ You may call one or more tools to assist with the user query.
184
+ Here are the tools available in JSONSchema format:
185
+
186
+ <tools>
187
+ <tool>{"name": "search_web", "description": "Search function.", "parameters": {"type": "object", "properties": {"query_list": {"type": "array", "items": {"type": "string"}, "description": "Keywords for search, list should contain 1 element."}, "query_tag": {"type": "array", "items": {"type": "string"}, "description": "Category of query"}}, "required": ["query_list", "query_tag"]}}</tool>
188
+ </tools>
189
+
190
+ When making tool calls, use XML format to invoke tools and pass parameters:
191
+
192
+ <minimax:tool_call>
193
+ <invoke name="tool-name-1">
194
+ <parameter name="param-key-1">param-value-1</parameter>
195
+ <parameter name="param-key-2">param-value-2</parameter>
196
+ ...
197
+ </invoke>
198
+ [e~[
199
+ ]~b]user
200
+ When were the latest announcements from OpenAI and Gemini?[e~[
201
+ ]~b]ai
202
+ <think>
203
+ ```
204
+
205
+ **Format Description:**
206
+
207
+ - `]~!b[]~b]system`: System message start marker
208
+ - `[e~[`: Message end marker
209
+ - `]~b]user`: User message start marker
210
+ - `]~b]ai`: Assistant message start marker
211
+ - `]~b]tool`: Tool result message start marker
212
+ - `<tools>...</tools>`: Tool definition area, each tool is wrapped with `<tool>` tag, content is JSON Schema
213
+ - `<minimax:tool_call>...</minimax:tool_call>`: Tool call area
214
+ - `<think>...</think>`: Thinking process marker during generation
215
+
216
+ ### Model Output Format
217
+
218
+ MiniMax-M2 uses structured XML tag format:
219
+
220
+ ```xml
221
+ <minimax:tool_call>
222
+ <invoke name="search_web">
223
+ <parameter name="query_tag">["technology", "events"]</parameter>
224
+ <parameter name="query_list">["\"OpenAI\" \"latest\" \"release\""]</parameter>
225
+ </invoke>
226
+ <invoke name="search_web">
227
+ <parameter name="query_tag">["technology", "events"]</parameter>
228
+ <parameter name="query_list">["\"Gemini\" \"latest\" \"release\""]</parameter>
229
+ </invoke>
230
+ </minimax:tool_call>
231
+ ```
232
+
233
+ Each tool call uses the `<invoke name="function_name">` tag, and parameters use the `<parameter name="parameter_name">` tag wrapper.
234
+
235
+ ## Manually Parsing Tool Call Results
236
+
237
+ ### Parsing Tool Calls
238
+
239
+ MiniMax-M2 uses structured XML tags, which require a different parsing approach. The core function is as follows:
240
+
241
+ ```python
242
+ import re
243
+ import json
244
+ from typing import Any, Optional, List, Dict
245
+
246
+
247
+ def extract_name(name_str: str) -> str:
248
+ """Extract name from quoted string"""
249
+ name_str = name_str.strip()
250
+ if name_str.startswith('"') and name_str.endswith('"'):
251
+ return name_str[1:-1]
252
+ elif name_str.startswith("'") and name_str.endswith("'"):
253
+ return name_str[1:-1]
254
+ return name_str
255
+
256
+
257
+ def convert_param_value(value: str, param_type: str) -> Any:
258
+ """Convert parameter value based on parameter type"""
259
+ if value.lower() == "null":
260
+ return None
261
+
262
+ param_type = param_type.lower()
263
+
264
+ if param_type in ["string", "str", "text"]:
265
+ return value
266
+ elif param_type in ["integer", "int"]:
267
+ try:
268
+ return int(value)
269
+ except (ValueError, TypeError):
270
+ return value
271
+ elif param_type in ["number", "float"]:
272
+ try:
273
+ val = float(value)
274
+ return val if val != int(val) else int(val)
275
+ except (ValueError, TypeError):
276
+ return value
277
+ elif param_type in ["boolean", "bool"]:
278
+ return value.lower() in ["true", "1"]
279
+ elif param_type in ["object", "array"]:
280
+ try:
281
+ return json.loads(value)
282
+ except json.JSONDecodeError:
283
+ return value
284
+ else:
285
+ # Try JSON parsing, return string if failed
286
+ try:
287
+ return json.loads(value)
288
+ except json.JSONDecodeError:
289
+ return value
290
+
291
+
292
+ def parse_tool_calls(model_output: str, tools: Optional[List[Dict]] = None) -> List[Dict]:
293
+ """
294
+ Extract all tool calls from model output
295
+
296
+ Args:
297
+ model_output: Complete output text from the model
298
+ tools: Tool definition list for getting parameter type information, format can be:
299
+ - [{"name": "...", "parameters": {...}}]
300
+ - [{"type": "function", "function": {"name": "...", "parameters": {...}}}]
301
+
302
+ Returns:
303
+ Parsed tool call list, each element contains name and arguments fields
304
+
305
+ Example:
306
+ >>> tools = [{
307
+ ... "name": "get_weather",
308
+ ... "parameters": {
309
+ ... "type": "object",
310
+ ... "properties": {
311
+ ... "location": {"type": "string"},
312
+ ... "unit": {"type": "string"}
313
+ ... }
314
+ ... }
315
+ ... }]
316
+ >>> output = '''<minimax:tool_call>
317
+ ... <invoke name="get_weather">
318
+ ... <parameter name="location">San Francisco</parameter>
319
+ ... <parameter name="unit">celsius</parameter>
320
+ ... </invoke>
321
+ ... </minimax:tool_call>'''
322
+ >>> result = parse_tool_calls(output, tools)
323
+ >>> print(result)
324
+ [{'name': 'get_weather', 'arguments': {'location': 'San Francisco', 'unit': 'celsius'}}]
325
+ """
326
+ # Quick check if tool call marker is present
327
+ if "<minimax:tool_call>" not in model_output:
328
+ return []
329
+
330
+ tool_calls = []
331
+
332
+ try:
333
+ # Match all <minimax:tool_call> blocks
334
+ tool_call_regex = re.compile(r"<minimax:tool_call>(.*?)</minimax:tool_call>", re.DOTALL)
335
+ invoke_regex = re.compile(r"<invoke name=(.*?)</invoke>", re.DOTALL)
336
+ parameter_regex = re.compile(r"<parameter name=(.*?)</parameter>", re.DOTALL)
337
+
338
+ # Iterate through all tool_call blocks
339
+ for tool_call_match in tool_call_regex.findall(model_output):
340
+ # Iterate through all invokes in this block
341
+ for invoke_match in invoke_regex.findall(tool_call_match):
342
+ # Extract function name
343
+ name_match = re.search(r'^([^>]+)', invoke_match)
344
+ if not name_match:
345
+ continue
346
+
347
+ function_name = extract_name(name_match.group(1))
348
+
349
+ # Get parameter configuration
350
+ param_config = {}
351
+ if tools:
352
+ for tool in tools:
353
+ tool_name = tool.get("name") or tool.get("function", {}).get("name")
354
+ if tool_name == function_name:
355
+ params = tool.get("parameters") or tool.get("function", {}).get("parameters")
356
+ if isinstance(params, dict) and "properties" in params:
357
+ param_config = params["properties"]
358
+ break
359
+
360
+ # Extract parameters
361
+ param_dict = {}
362
+ for match in parameter_regex.findall(invoke_match):
363
+ param_match = re.search(r'^([^>]+)>(.*)', match, re.DOTALL)
364
+ if param_match:
365
+ param_name = extract_name(param_match.group(1))
366
+ param_value = param_match.group(2).strip()
367
+
368
+ # Remove leading and trailing newlines
369
+ if param_value.startswith('\n'):
370
+ param_value = param_value[1:]
371
+ if param_value.endswith('\n'):
372
+ param_value = param_value[:-1]
373
+
374
+ # Get parameter type and convert
375
+ param_type = "string"
376
+ if param_name in param_config:
377
+ if isinstance(param_config[param_name], dict) and "type" in param_config[param_name]:
378
+ param_type = param_config[param_name]["type"]
379
+
380
+ param_dict[param_name] = convert_param_value(param_value, param_type)
381
+
382
+ tool_calls.append({
383
+ "name": function_name,
384
+ "arguments": param_dict
385
+ })
386
+
387
+ except Exception as e:
388
+ print(f"Failed to parse tool calls: {e}")
389
+ return []
390
+
391
+ return tool_calls
392
+ ```
393
+
394
+ **Usage Example:**
395
+
396
+ ```python
397
+ # Define tools
398
+ tools = [
399
+ {
400
+ "name": "get_weather",
401
+ "parameters": {
402
+ "type": "object",
403
+ "properties": {
404
+ "location": {"type": "string"},
405
+ "unit": {"type": "string"}
406
+ },
407
+ "required": ["location", "unit"]
408
+ }
409
+ }
410
+ ]
411
+
412
+ # Model output
413
+ model_output = """Let me help you query the weather.
414
+ <minimax:tool_call>
415
+ <invoke name="get_weather">
416
+ <parameter name="location">San Francisco</parameter>
417
+ <parameter name="unit">celsius</parameter>
418
+ </invoke>
419
+ </minimax:tool_call>"""
420
+
421
+ # Parse tool calls
422
+ tool_calls = parse_tool_calls(model_output, tools)
423
+
424
+ # Output results
425
+ for call in tool_calls:
426
+ print(f"Function called: {call['name']}")
427
+ print(f"Arguments: {call['arguments']}")
428
+ # Output: Function called: get_weather
429
+ # Arguments: {'location': 'San Francisco', 'unit': 'celsius'}
430
+ ```
431
+
432
+ ### Executing Tool Calls
433
+
434
+ After parsing is complete, you can execute the corresponding tool and construct the return result:
435
+
436
+ ```python
437
+ def execute_function_call(function_name: str, arguments: dict):
438
+ """Execute function call and return result"""
439
+ if function_name == "get_weather":
440
+ location = arguments.get("location", "Unknown location")
441
+ unit = arguments.get("unit", "celsius")
442
+ # Build function execution result
443
+ return {
444
+ "role": "tool",
445
+ "content": [
446
+ {
447
+ "name": function_name,
448
+ "type": "text",
449
+ "text": json.dumps({
450
+ "location": location,
451
+ "temperature": "25",
452
+ "unit": unit,
453
+ "weather": "Sunny"
454
+ }, ensure_ascii=False)
455
+ }
456
+ ]
457
+ }
458
+ elif function_name == "search_web":
459
+ query_list = arguments.get("query_list", [])
460
+ query_tag = arguments.get("query_tag", [])
461
+ # Simulate search results
462
+ return {
463
+ "role": "tool",
464
+ "content": [
465
+ {
466
+ "name": function_name,
467
+ "type": "text",
468
+ "text": f"Search keywords: {query_list}, Category: {query_tag}\nSearch results: Relevant information found"
469
+ }
470
+ ]
471
+ }
472
+
473
+ return None
474
+ ```
475
+
476
+ ### Returning Tool Execution Results to the Model
477
+
478
+ After successfully parsing tool calls, you should add the tool execution results to the conversation history so that the model can access and utilize this information in subsequent interactions. Refer to [chat_template.jinja](https://huggingface.co/MiniMaxAI/MiniMax-M2/blob/main/chat_template.jinja) for concatenation format.
479
+
480
+ ## References
481
+
482
+ - [MiniMax-M2 Model Repository](https://github.com/MiniMax-AI/MiniMax-M2)
483
+ - [vLLM Project Homepage](https://github.com/vllm-project/vllm)
484
+ - [SGLang Project Homepage](https://github.com/sgl-project/sglang)
485
+ - [OpenAI Python SDK](https://github.com/openai/openai-python)
docs/tool_calling_guide_cn.md ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-M2 工具调用指南
2
+
3
+ [英文版](./tool_calling_guide.md) | [中文版](./tool_calling_guide_cn.md)
4
+
5
+ ## 简介
6
+
7
+ MiniMax-M2 模型支持工具调用功能,使模型能够识别何时需要调用外部工具,并以结构化格式输出工具调用参数。本文档提供了有关如何使用 MiniMax-M2 工具调用功能的详细说明。
8
+
9
+ ## 基础示例
10
+
11
+ 以下 Python 脚本基于 OpenAI SDK 实现了一个天气查询工具调用示例:
12
+
13
+ ```python
14
+ from openai import OpenAI
15
+ import json
16
+
17
+ client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
18
+
19
+ def get_weather(location: str, unit: str):
20
+ return f"Getting the weather for {location} in {unit}..."
21
+
22
+ tool_functions = {"get_weather": get_weather}
23
+
24
+ tools = [{
25
+ "type": "function",
26
+ "function": {
27
+ "name": "get_weather",
28
+ "description": "Get the current weather in a given location",
29
+ "parameters": {
30
+ "type": "object",
31
+ "properties": {
32
+ "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"},
33
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
34
+ },
35
+ "required": ["location", "unit"]
36
+ }
37
+ }
38
+ }]
39
+
40
+ response = client.chat.completions.create(
41
+ model=client.models.list().data[0].id,
42
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco? use celsius."}],
43
+ tools=tools,
44
+ tool_choice="auto"
45
+ )
46
+
47
+ print(response)
48
+
49
+ tool_call = response.choices[0].message.tool_calls[0].function
50
+ print(f"Function called: {tool_call.name}")
51
+ print(f"Arguments: {tool_call.arguments}")
52
+ print(f"Result: {get_weather(**json.loads(tool_call.arguments))}")
53
+ ```
54
+
55
+ **输出示例:**
56
+ ```
57
+ Function called: get_weather
58
+ Arguments: {"location": "San Francisco, CA", "unit": "celsius"}
59
+ Result: Getting the weather for San Francisco, CA in celsius...
60
+ ```
61
+
62
+ ## 手动解析模型输出
63
+
64
+ **我们强烈建议使用 vLLM 或 SGLnag 来解析工具调用。** 如果您无法使用支持 MiniMax-M2 的推理引擎(如 vLLM 和 SGLang)的内置解析器,或需要使用其他推理框架(如 transformers、TGI 等),您可以使用以下方法手动解析模型的原始输出。这种方法需要您自己解析模型输出的 XML 标签格式。
65
+
66
+ ### 使用 Transformers 的示例
67
+
68
+ 这是一个使用 transformers 库的完整示例:
69
+
70
+ ```python
71
+ from transformers import AutoTokenizer
72
+
73
+ def get_default_tools():
74
+ return [
75
+ {
76
+ "name": "get_current_weather",
77
+ "description": "Get the latest weather for a location",
78
+ "parameters": {
79
+ "type": "object",
80
+ "properties": {
81
+ "location": {
82
+ "type": "string",
83
+ "description": "A certain city, such as Beijing, Shanghai"
84
+ }
85
+ },
86
+ }
87
+ "required": ["location"],
88
+ "type": "object"
89
+ }
90
+ ]
91
+
92
+ # Load model and tokenizer
93
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
94
+ prompt = "What's the weather like in Shanghai today?"
95
+ messages = [
96
+ {"role": "system", "content": "You are a helpful assistant."},
97
+ {"role": "user", "content": prompt},
98
+ ]
99
+
100
+ # Enable function calling tools
101
+ tools = get_default_tools()
102
+
103
+ # Apply chat template and include tool definitions
104
+ text = tokenizer.apply_chat_template(
105
+ messages,
106
+ tokenize=False,
107
+ add_generation_prompt=True,
108
+ tools=tools
109
+ )
110
+
111
+ # Send request (using any inference service)
112
+ import requests
113
+ payload = {
114
+ "model": "MiniMaxAI/MiniMax-M2",
115
+ "prompt": text,
116
+ "max_tokens": 4096
117
+ }
118
+ response = requests.post(
119
+ "http://localhost:8000/v1/completions",
120
+ headers={"Content-Type": "application/json"},
121
+ json=payload,
122
+ stream=False,
123
+ )
124
+
125
+ # Model output needs manual parsing
126
+ raw_output = response.json()["choices"][0]["text"]
127
+ print("Raw output:", raw_output)
128
+
129
+ # Use the parsing function below to process the output
130
+ tool_calls = parse_tool_calls(raw_output, tools)
131
+ ```
132
+
133
+ ## 🛠️ 工具调用定义
134
+
135
+ ### 工具结构
136
+
137
+ 工具调用需要在请求体中定义 `tools` 字段。每个工具由以下部分组成:
138
+
139
+ ```json
140
+ {
141
+ "tools": [
142
+ {
143
+ "name": "search_web",
144
+ "description": "Search function.",
145
+ "parameters": {
146
+ "properties": {
147
+ "query_list": {
148
+ "description": "Keywords for search, list should contain 1 element.",
149
+ "items": { "type": "string" },
150
+ "type": "array"
151
+ },
152
+ "query_tag": {
153
+ "description": "Category of query",
154
+ "items": { "type": "string" },
155
+ "type": "array"
156
+ }
157
+ },
158
+ "required": [ "query_list", "query_tag" ],
159
+ "type": "object"
160
+ }
161
+ }
162
+ ]
163
+ }
164
+ ```
165
+
166
+ **字段说明:**
167
+ - `name`:函数名称
168
+ - `description`:函数描述
169
+ - `parameters`:函数参数定义
170
+ - `properties`:参数属性定义,其中键是参数名称,值包含详细的参数描述
171
+ - `required`:必需参数列表
172
+ - `type`:参数类型(通常为 "object")
173
+
174
+ ### 内部处理格式
175
+
176
+ 在 MiniMax-M2 模型内部处理时,工具定义会被转换为特殊格式并连接到输入文本中。以下是一个完整示例:
177
+
178
+ ```
179
+ ]~!b[]~b]system
180
+ You are a helpful assistant.
181
+
182
+ # Tools
183
+ You may call one or more tools to assist with the user query.
184
+ Here are the tools available in JSONSchema format:
185
+
186
+ <tools>
187
+ <tool>{"name": "search_web", "description": "Search function.", "parameters": {"type": "object", "properties": {"query_list": {"type": "array", "items": {"type": "string"}, "description": "Keywords for search, list should contain 1 element."}, "query_tag": {"type": "array", "items": {"type": "string"}, "description": "Category of query"}}, "required": ["query_list", "query_tag"]}}</tool>
188
+ </tools>
189
+
190
+ When making tool calls, use XML format to invoke tools and pass parameters:
191
+
192
+ <minimax:tool_call>
193
+ <invoke name="tool-name-1">
194
+ <parameter name="param-key-1">param-value-1</parameter>
195
+ <parameter name="param-key-2">param-value-2</parameter>
196
+ ...
197
+ </invoke>
198
+ [e~[
199
+ ]~b]user
200
+ When were the latest announcements from OpenAI and Gemini?[e~[
201
+ ]~b]ai
202
+ <think>
203
+ ```
204
+
205
+ **格式说明:**
206
+
207
+ - `]~!b[]~b]system`:系统消息开始标记
208
+ - `[e~[`:消息结束标记
209
+ - `]~b]user`:用户消息开始标记
210
+ - `]~b]ai`:助手消息开始标记
211
+ - `]~b]tool`:工具结果消息开始标记
212
+ - `<tools>...</tools>`:工具定义区域,每个工具都用 `<tool>` 标签包装,内容为 JSON Schema
213
+ - `<minimax:tool_call>...</minimax:tool_call>`:工具调用区域
214
+ - `<think>...</think>`:生成过程中的思考过程标记
215
+
216
+ ### 模型输出格式
217
+
218
+ MiniMax-M2 使用结构化的 XML 标签格式:
219
+
220
+ ```xml
221
+ <minimax:tool_call>
222
+ <invoke name="search_web">
223
+ <parameter name="query_tag">["technology", "events"]</parameter>
224
+ <parameter name="query_list">["\"OpenAI\" \"latest\" \"release\""]</parameter>
225
+ </invoke>
226
+ <invoke name="search_web">
227
+ <parameter name="query_tag">["technology", "events"]</parameter>
228
+ <parameter name="query_list">["\"Gemini\" \"latest\" \"release\""]</parameter>
229
+ </invoke>
230
+ </minimax:tool_call>
231
+ ```
232
+
233
+ 每个工具调用使用 `<invoke name="function_name">` 标签,参数使用 `<parameter name="parameter_name">` 标签包装。
234
+
235
+ ## 手动解析工具调用结果
236
+
237
+ ### 解析工具调用
238
+
239
+ MiniMax-M2 使用结构化的 XML 标签,这需要一种不同的解析方法。核心函数如下:
240
+
241
+ ```python
242
+ import re
243
+ import json
244
+ from typing import Any, Optional, List, Dict
245
+
246
+
247
+ def extract_name(name_str: str) -> str:
248
+ """Extract name from quoted string"""
249
+ name_str = name_str.strip()
250
+ if name_str.startswith('"') and name_str.endswith('"'):
251
+ return name_str[1:-1]
252
+ elif name_str.startswith("'") and name_str.endswith("'"):
253
+ return name_str[1:-1]
254
+ return name_str
255
+
256
+
257
+ def convert_param_value(value: str, param_type: str) -> Any:
258
+ """Convert parameter value based on parameter type"""
259
+ if value.lower() == "null":
260
+ return None
261
+
262
+ param_type = param_type.lower()
263
+
264
+ if param_type in ["string", "str", "text"]:
265
+ return value
266
+ elif param_type in ["integer", "int"]:
267
+ try:
268
+ return int(value)
269
+ except (ValueError, TypeError):
270
+ return value
271
+ elif param_type in ["number", "float"]:
272
+ try:
273
+ val = float(value)
274
+ return val if val != int(val) else int(val)
275
+ except (ValueError, TypeError):
276
+ return value
277
+ elif param_type in ["boolean", "bool"]:
278
+ return value.lower() in ["true", "1"]
279
+ elif param_type in ["object", "array"]:
280
+ try:
281
+ return json.loads(value)
282
+ except json.JSONDecodeError:
283
+ return value
284
+ else:
285
+ # Try JSON parsing, return string if failed
286
+ try:
287
+ return json.loads(value)
288
+ except json.JSONDecodeError:
289
+ return value
290
+
291
+
292
+ def parse_tool_calls(model_output: str, tools: Optional[List[Dict]] = None) -> List[Dict]:
293
+ """
294
+ Extract all tool calls from model output
295
+
296
+ Args:
297
+ model_output: Complete output text from the model
298
+ tools: Tool definition list for getting parameter type information, format can be:
299
+ - [{"name": "...", "parameters": {...}}]
300
+ - [{"type": "function", "function": {"name": "...", "parameters": {...}}}]
301
+
302
+ Returns:
303
+ Parsed tool call list, each element contains name and arguments fields
304
+
305
+ Example:
306
+ >>> tools = [{
307
+ ... "name": "get_weather",
308
+ ... "parameters": {
309
+ ... "type": "object",
310
+ ... "properties": {
311
+ ... "location": {"type": "string"},
312
+ ... "unit": {"type": "string"}
313
+ ... }
314
+ ... }
315
+ ... }]
316
+ >>> output = '''<minimax:tool_call>
317
+ ... <invoke name="get_weather">
318
+ ... <parameter name="location">San Francisco</parameter>
319
+ ... <parameter name="unit">celsius</parameter>
320
+ ... </invoke>
321
+ ... </minimax:tool_call>'''
322
+ >>> result = parse_tool_calls(output, tools)
323
+ >>> print(result)
324
+ [{'name': 'get_weather', 'arguments': {'location': 'San Francisco', 'unit': 'celsius'}}]
325
+ """
326
+ # Quick check if tool call marker is present
327
+ if "<minimax:tool_call>" not in model_output:
328
+ return []
329
+
330
+ tool_calls = []
331
+
332
+ try:
333
+ # Match all <minimax:tool_call> blocks
334
+ tool_call_regex = re.compile(r"<minimax:tool_call>(.*?)</minimax:tool_call>", re.DOTALL)
335
+ invoke_regex = re.compile(r"<invoke name=(.*?)</invoke>", re.DOTALL)
336
+ parameter_regex = re.compile(r"<parameter name=(.*?)</parameter>", re.DOTALL)
337
+
338
+ # Iterate through all tool_call blocks
339
+ for tool_call_match in tool_call_regex.findall(model_output):
340
+ # Iterate through all invokes in this block
341
+ for invoke_match in invoke_regex.findall(tool_call_match):
342
+ # Extract function name
343
+ name_match = re.search(r'^([^>]+)', invoke_match)
344
+ if not name_match:
345
+ continue
346
+
347
+ function_name = extract_name(name_match.group(1))
348
+
349
+ # Get parameter configuration
350
+ param_config = {}
351
+ if tools:
352
+ for tool in tools:
353
+ tool_name = tool.get("name") or tool.get("function", {}).get("name")
354
+ if tool_name == function_name:
355
+ params = tool.get("parameters") or tool.get("function", {}).get("parameters")
356
+ if isinstance(params, dict) and "properties" in params:
357
+ param_config = params["properties"]
358
+ break
359
+
360
+ # Extract parameters
361
+ param_dict = {}
362
+ for match in parameter_regex.findall(invoke_match):
363
+ param_match = re.search(r'^([^>]+)>(.*)', match, re.DOTALL)
364
+ if param_match:
365
+ param_name = extract_name(param_match.group(1))
366
+ param_value = param_match.group(2).strip()
367
+
368
+ # Remove leading and trailing newlines
369
+ if param_value.startswith('\n'):
370
+ param_value = param_value[1:]
371
+ if param_value.endswith('\n'):
372
+ param_value = param_value[:-1]
373
+
374
+ # Get parameter type and convert
375
+ param_type = "string"
376
+ if param_name in param_config:
377
+ if isinstance(param_config[param_name], dict) and "type" in param_config[param_name]:
378
+ param_type = param_config[param_name]["type"]
379
+
380
+ param_dict[param_name] = convert_param_value(param_value, param_type)
381
+
382
+ tool_calls.append({
383
+ "name": function_name,
384
+ "arguments": param_dict
385
+ })
386
+
387
+ except Exception as e:
388
+ print(f"Failed to parse tool calls: {e}")
389
+ return []
390
+
391
+ return tool_calls
392
+ ```
393
+
394
+ **使用示例:**
395
+
396
+ ```python
397
+ # Define tools
398
+ tools = [
399
+ {
400
+ "name": "get_weather",
401
+ "parameters": {
402
+ "type": "object",
403
+ "properties": {
404
+ "location": {"type": "string"},
405
+ "unit": {"type": "string"}
406
+ },
407
+ "required": ["location", "unit"]
408
+ }
409
+ }
410
+ ]
411
+
412
+ # Model output
413
+ model_output = """Let me help you query the weather.
414
+ <minimax:tool_call>
415
+ <invoke name="get_weather">
416
+ <parameter name="location">San Francisco</parameter>
417
+ <parameter name="unit">celsius</parameter>
418
+ </invoke>
419
+ </minimax:tool_call>"""
420
+
421
+ # Parse tool calls
422
+ tool_calls = parse_tool_calls(model_output, tools)
423
+
424
+ # Output results
425
+ for call in tool_calls:
426
+ print(f"Function called: {call['name']}")
427
+ print(f"Arguments: {call['arguments']}")
428
+ # Output: Function called: get_weather
429
+ # Arguments: {'location': 'San Francisco', 'unit': 'celsius'}
430
+ ```
431
+
432
+ ### 执行工具调用
433
+
434
+ 完成解析后,您可以执行相应的工具并构造返回结果:
435
+
436
+ ```python
437
+ def execute_function_call(function_name: str, arguments: dict):
438
+ """Execute function call and return result"""
439
+ if function_name == "get_weather":
440
+ location = arguments.get("location", "Unknown location")
441
+ unit = arguments.get("unit", "celsius")
442
+ # Build function execution result
443
+ return {
444
+ "role": "tool",
445
+ "content": [
446
+ {
447
+ "name": function_name,
448
+ "type": "text",
449
+ "text": json.dumps({
450
+ "location": location,
451
+ "temperature": "25",
452
+ "unit": unit,
453
+ "weather": "Sunny"
454
+ }, ensure_ascii=False)
455
+ }
456
+ ]
457
+ }
458
+ elif function_name == "search_web":
459
+ query_list = arguments.get("query_list", [])
460
+ query_tag = arguments.get("query_tag", [])
461
+ # Simulate search results
462
+ return {
463
+ "role": "tool",
464
+ "content": [
465
+ {
466
+ "name": function_name,
467
+ "type": "text",
468
+ "text": f"Search keywords: {query_list}, Category: {query_tag}\nSearch results: Relevant information found"
469
+ }
470
+ ]
471
+ }
472
+
473
+ return None
474
+ ```
475
+
476
+ ### 将工具执行结果返回给模型
477
+
478
+ 在成功解析工具调用后,您应该将工具执行结果添加到对话历史中,以便模型在后续交互中可以访问和利用这些信息。请参考 [chat_template.jinja](https://huggingface.co/MiniMaxAI/MiniMax-M2/blob/main/chat_template.jinja) 了解连接格式。
479
+
480
+ ## 参考文献
481
+
482
+ - [MiniMax-M2 模型仓库](https://github.com/MiniMax-AI/MiniMax-M2)
483
+ - [vLLM 项目主页](https://github.com/vllm-project/vllm)
484
+ - [SGLang 项目主页](https://github.com/sgl-project/sglang)
485
+ - [OpenAI Python SDK](https://github.com/openai/openai-python)
486
+
487
+ ## 获取支持
488
+
489
+ 如果遇到任何问题:
490
+
491
+ - 通过邮箱 [[email protected]](mailto:[email protected]) 等官方渠道联系我们的技术支持团队
492
+
493
+ - 在我们的仓库提交 Issue
494
+
495
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
496
+
497
+ 我们会持续优化模型的使用体验,欢迎反馈!
docs/transformers_deploy_guide.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2 Model Transformers Deployment Guide
2
+
3
+ [English Version](./transformers_deploy_guide.md) | [Chinese Version](./transformers_deploy_guide_cn.md)
4
+
5
+ ## Applicable Models
6
+
7
+ This document applies to the following models. You only need to change the model name during deployment.
8
+
9
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
10
+
11
+ The deployment process is illustrated below using MiniMax-M2 as an example.
12
+
13
+ ## System Requirements
14
+
15
+ - OS: Linux
16
+
17
+ - Python: 3.9 - 3.12
18
+
19
+ - Transformers: 4.57.1
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - Memory requirements: 220 GB for weights.
26
+
27
+ ## Deployment with Python
28
+
29
+ It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
30
+
31
+ We recommend installing Transformers in a fresh Python environment:
32
+
33
+ ```bash
34
+ uv pip install transformers torch accelerate --torch-backend=auto
35
+ ```
36
+
37
+ Run the following Python script to run the model. Transformers will automatically download and cache the MiniMax-M2 model from Hugging Face.
38
+
39
+ ```python
40
+ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
41
+ import torch
42
+
43
+ MODEL_PATH = "MiniMaxAI/MiniMax-M2"
44
+
45
+ model = AutoModelForCausalLM.from_pretrained(
46
+ MODEL_PATH,
47
+ device_map="auto",
48
+ trust_remote_code=True,
49
+ )
50
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
51
+
52
+ messages = [
53
+ {"role": "user", "content": [{"type": "text", "text": "What is your favourite condiment?"}]},
54
+ {"role": "assistant", "content": [{"type": "text", "text": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}]},
55
+ {"role": "user", "content": [{"type": "text", "text": "Do you have mayonnaise recipes?"}]}
56
+ ]
57
+
58
+ model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
59
+
60
+ generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
61
+
62
+ response = tokenizer.batch_decode(generated_ids)[0]
63
+
64
+ print(response)
65
+ ```
66
+
67
+ ## Common Issues
68
+
69
+ ### Hugging Face Network Issues
70
+
71
+ If you encounter network issues, you can set up a proxy before pulling the model.
72
+
73
+ ```bash
74
+ export HF_ENDPOINT=https://hf-mirror.com
75
+ ```
76
+
77
+ ### MiniMax-M2 model is not currently supported
78
+
79
+ Please check that trust_remote_code=True.
80
+
81
+ ## Getting Support
82
+
83
+ If you encounter any issues while deploying the MiniMax model:
84
+
85
+ - Contact our technical support team through official channels such as email at [[email protected]](mailto:[email protected])
86
+
87
+ - Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
88
+
89
+ We continuously optimize the deployment experience for our models. Feedback is welcome!
90
+
docs/transformers_deploy_guide_cn.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2 模型 Transformers 部署指南
2
+
3
+ [英文版](./transformers_deploy_guide.md) | [中文版](./transformers_deploy_guide_cn.md)
4
+
5
+ ## 本文档适用模型
6
+
7
+ 本文档适用以下模型,只需在部署时修改模型名称即可。
8
+
9
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
10
+
11
+ 以下以 MiniMax-M2 为例说明部署流程。
12
+
13
+ ## 环境要求
14
+
15
+ - OS:Linux
16
+
17
+ - Python:3.9 - 3.12
18
+
19
+ - Transformers: 4.57.1
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - 显存需求:权重需要 220 GB
26
+
27
+ ## 使用 Python 部署
28
+
29
+ 建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
30
+
31
+ 建议在全新的 Python 环境中安装 Transformers:
32
+
33
+ ```bash
34
+ uv pip install transformers torch accelerate --torch-backend=auto
35
+ ```
36
+
37
+ 运行如下 Python 命令运行模型,Transformers 会自动从 Huggingface 下载并缓存 MiniMax-M2 模型。
38
+
39
+ ```python
40
+ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
41
+ import torch
42
+
43
+ MODEL_PATH = "MiniMaxAI/MiniMax-M2"
44
+
45
+ model = AutoModelForCausalLM.from_pretrained(
46
+ MODEL_PATH,
47
+ device_map="auto",
48
+ trust_remote_code=True,
49
+ )
50
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
51
+
52
+ messages = [
53
+ {"role": "user", "content": [{"type": "text", "text": "What is your favourite condiment?"}]},
54
+ {"role": "assistant", "content": [{"type": "text", "text": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}]},
55
+ {"role": "user", "content": [{"type": "text", "text": "Do you have mayonnaise recipes?"}]}
56
+ ]
57
+
58
+ model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
59
+
60
+ generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
61
+
62
+ response = tokenizer.batch_decode(generated_ids)[0]
63
+
64
+ print(response)
65
+ ```
66
+
67
+ ## 常见问题
68
+
69
+ ### Huggingface 网络问题
70
+
71
+ 如果遇到网络问题,可以设置代理后再进行拉取。
72
+
73
+ ```bash
74
+ export HF_ENDPOINT=https://hf-mirror.com
75
+ ```
76
+
77
+ ### MiniMax-M2 model is not currently supported
78
+
79
+ 请确认开启 trust_remote_code=True。
80
+
81
+ ## 获取支持
82
+
83
+ 如果在部署 MiniMax 模型过程中遇到任何问题:
84
+
85
+ - 通过邮箱 [[email protected]](mailto:[email protected]) 等官方渠道联系我们的技术支持团队
86
+
87
+ - 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
88
+
89
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
90
+
91
+ 我们会持续优化模型的部署体验,欢迎反馈!
docs/vllm_deploy_guide.md ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2 Model vLLM Deployment Guide
2
+
3
+ [English Version](./vllm_deploy_guide.md) | [Chinese Version](./vllm_deploy_guide_cn.md)
4
+
5
+ We recommend using [vLLM](https://docs.vllm.ai/en/stable/) to deploy the [MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2) model. vLLM is a high-performance inference engine with excellent serving throughput, efficient and intelligent memory management, powerful batch request processing capabilities, and deeply optimized underlying performance. We recommend reviewing vLLM's official documentation to check hardware compatibility before deployment.
6
+
7
+ ## Applicable Models
8
+
9
+ This document applies to the following models. You only need to change the model name during deployment.
10
+
11
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
12
+
13
+ The deployment process is illustrated below using MiniMax-M2 as an example.
14
+
15
+ ## System Requirements
16
+
17
+ - OS: Linux
18
+
19
+ - Python: 3.9 - 3.12
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - Memory requirements: 220 GB for weights, 240 GB per 1M context tokens
26
+
27
+ The following are recommended configurations; actual requirements should be adjusted based on your use case:
28
+
29
+ - 4x 96GB GPUs: Supported context length of up to 400K tokens.
30
+
31
+ - 8x 144GB GPUs: Supported context length of up to 3M tokens.
32
+
33
+ ## Deployment with Python
34
+
35
+ It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
36
+
37
+ We recommend installing vLLM in a fresh Python environment:
38
+
39
+ ```bash
40
+ uv pip install 'triton-kernels @ git+https://github.com/triton-lang/[email protected]#subdirectory=python/triton_kernels' vllm --extra-index-url https://wheels.vllm.ai/nightly --prerelease=allow
41
+ ```
42
+
43
+ Run the following command to start the vLLM server. vLLM will automatically download and cache the MiniMax-M2 model from Hugging Face.
44
+
45
+ 4-GPU deployment command:
46
+
47
+ ```bash
48
+ SAFETENSORS_FAST_GPU=1 vllm serve \
49
+ MiniMaxAI/MiniMax-M2 --trust-remote-code \
50
+ --tensor-parallel-size 4 \
51
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
52
+ --reasoning-parser minimax_m2_append_think
53
+ ```
54
+
55
+ 8-GPU deployment command:
56
+
57
+ ```bash
58
+ SAFETENSORS_FAST_GPU=1 vllm serve \
59
+ MiniMaxAI/MiniMax-M2 --trust-remote-code \
60
+ --enable_expert_parallel --tensor-parallel-size 8 \
61
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
62
+ --reasoning-parser minimax_m2_append_think
63
+ ```
64
+
65
+ ## Testing Deployment
66
+
67
+ After startup, you can test the vLLM OpenAI-compatible API with the following command:
68
+
69
+ ```bash
70
+ curl http://localhost:8000/v1/chat/completions \
71
+ -H "Content-Type: application/json" \
72
+ -d '{
73
+ "model": "MiniMaxAI/MiniMax-M2",
74
+ "messages": [
75
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
76
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
77
+ ]
78
+ }'
79
+ ```
80
+
81
+ ## Common Issues
82
+
83
+ ### Hugging Face Network Issues
84
+
85
+ If you encounter network issues, you can set up a proxy before pulling the model.
86
+
87
+ ```bash
88
+ export HF_ENDPOINT=https://hf-mirror.com
89
+ ```
90
+
91
+ ### MiniMax-M2 model is not currently supported
92
+
93
+ This vLLM version is outdated. Please upgrade to the latest version.
94
+
95
+ ### torch.AcceleratorError: CUDA error: an illegal memory access was encountered
96
+ Add `--compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"` to the startup parameters to resolve this issue. For example:
97
+
98
+ ```bash
99
+ SAFETENSORS_FAST_GPU=1 vllm serve \
100
+ MiniMaxAI/MiniMax-M2 --trust-remote-code \
101
+ --enable_expert_parallel --tensor-parallel-size 8 \
102
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
103
+ --reasoning-parser minimax_m2_append_think \
104
+ --compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"
105
+ ```
106
+
107
+ ## Getting Support
108
+
109
+ If you encounter any issues while deploying the MiniMax model:
110
+
111
+ - Contact our technical support team through official channels such as email at [[email protected]](mailto:[email protected])
112
+
113
+ - Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
114
+
115
+ We continuously optimize the deployment experience for our models. Feedback is welcome!
116
+
docs/vllm_deploy_guide_cn.md ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2 模型 vLLM 部署指南
2
+
3
+ [英文版](./vllm_deploy_guide.md) | [中文版](./vllm_deploy_guide_cn.md)
4
+
5
+ 我们推荐使用 [vLLM](https://docs.vllm.ai/en/stable/) 来部署 [MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2) 模型。vLLM 是一个高性能的推理引擎,其具有卓越的服务吞吐、高效智能的内存管理机制、强大的批量请求处理能力、深度优化的底层性能等特性。我们建议在部署之前查看 vLLM 的官方文档以检查硬件兼容性。
6
+
7
+ ## 本文档适用模型
8
+
9
+ 本文档适用以下模型,只需在部署时修改模型名称即可。
10
+
11
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
12
+
13
+ 以下以 MiniMax-M2 为例说明部署流程。
14
+
15
+ ## 环境要求
16
+
17
+ - OS:Linux
18
+
19
+ - Python:3.9 - 3.12
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - 显存需求:权重需要 220 GB,每 1M 上下文 token 需要 240 GB
26
+
27
+ 以下为推荐配置,实际需求请根据业务场景调整:
28
+
29
+ - 96G x4 GPU:支持 40 万 token 的总上下文。
30
+
31
+ - 144G x8 GPU:支持长达 300 万 token 的总上下文。
32
+
33
+ ## 使用 Python 部署
34
+
35
+ 建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
36
+
37
+ 建议在全新的 Python 环境中安装 vLLM:
38
+ ```bash
39
+ uv pip install 'triton-kernels @ git+https://github.com/triton-lang/[email protected]#subdirectory=python/triton_kernels' vllm --extra-index-url https://wheels.vllm.ai/nightly --prerelease=allow
40
+ ```
41
+
42
+ 运行如下命令启动 vLLM 服务器,vLLM 会自动从 Huggingface 下载并缓存 MiniMax-M2 模型。
43
+
44
+ 4 卡部署命令:
45
+
46
+ ```bash
47
+ SAFETENSORS_FAST_GPU=1 vllm serve \
48
+ MiniMaxAI/MiniMax-M2 --trust-remote-code \
49
+ --tensor-parallel-size 4 \
50
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
51
+ --reasoning-parser minimax_m2_append_think
52
+ ```
53
+
54
+ 8 卡部署命令:
55
+
56
+ ```bash
57
+ SAFETENSORS_FAST_GPU=1 vllm serve \
58
+ MiniMaxAI/MiniMax-M2 --trust-remote-code \
59
+ --enable_expert_parallel --tensor-parallel-size 8 \
60
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
61
+ --reasoning-parser minimax_m2_append_think
62
+ ```
63
+
64
+ ## 测试部署
65
+
66
+ 启动后,可以通过如下命令测试 vLLM OpenAI 兼容接口:
67
+
68
+ ```bash
69
+ curl http://localhost:8000/v1/chat/completions \
70
+ -H "Content-Type: application/json" \
71
+ -d '{
72
+ "model": "MiniMaxAI/MiniMax-M2",
73
+ "messages": [
74
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
75
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
76
+ ]
77
+ }'
78
+ ```
79
+
80
+ ## 常见问题
81
+
82
+ ### Huggingface 网络问题
83
+
84
+ 如果遇到网络问题,可以设置代理后再进行拉取。
85
+
86
+ ```bash
87
+ export HF_ENDPOINT=https://hf-mirror.com
88
+ ```
89
+
90
+ ### MiniMax-M2 model is not currently supported
91
+
92
+ 该 vLLM 版本过旧,请升级到最新版本。
93
+
94
+ ### torch.AcceleratorError: CUDA error: an illegal memory access was encountered
95
+ 在启动参数添加 `--compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"` 可以解决。例如:
96
+
97
+ ```bash
98
+ SAFETENSORS_FAST_GPU=1 vllm serve \
99
+ MiniMaxAI/MiniMax-M2 --trust-remote-code \
100
+ --enable_expert_parallel --tensor-parallel-size 8 \
101
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
102
+ --reasoning-parser minimax_m2_append_think \
103
+ --compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"
104
+ ```
105
+
106
+ ## 获取支持
107
+
108
+ 如果在部署 MiniMax 模型过程中遇到任何问题:
109
+
110
+ - 通过邮箱 [[email protected]](mailto:[email protected]) 等官方渠道联系我们的技术支持团队
111
+
112
+ - 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
113
+
114
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
115
+
116
+ 我们会持续优化模型的部署体验,欢迎反馈!
figures/wechat.jpeg ADDED
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 200019,
3
+ "do_sample": true,
4
+ "eos_token_id": 200020,
5
+ "temperature": 1.0,
6
+ "top_p": 0.95,
7
+ "top_k": 40,
8
+ "transformers_version": "4.46.1"
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
model_card.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - text-generation
4
+ - conversational
5
+ - coding
6
+ - agent
7
+ - moe
8
+ - large-language-model
9
+ license: other
10
+ license_name: modified-mit
11
+ license_link: https://github.com/MiniMax-AI/MiniMax-M2/blob/main/LICENSE
12
+ library_name: transformers
13
+ pipeline_tag: text-generation
14
+ ---
15
+
16
+ # MiniMax-M2
17
+
18
+ MiniMax-M2 is a **Mini** model built for **Max** coding & agentic workflows. It's a compact, fast, and cost-effective MoE model (230 billion total parameters with 10 billion active parameters) built for elite performance in coding and agentic tasks.
modeling_minimax_m2.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from collections.abc import Callable
24
+ from typing import Optional, Union
25
+
26
+ import torch
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
34
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
35
+ from transformers.modeling_layers import (
36
+ GenericForQuestionAnswering,
37
+ GenericForSequenceClassification,
38
+ GenericForTokenClassification,
39
+ GradientCheckpointingLayer,
40
+ )
41
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
42
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
43
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
44
+ from transformers.processing_utils import Unpack
45
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
46
+ from transformers.utils.deprecation import deprecate_kwarg
47
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
48
+ from .configuration_minimax_m2 import MiniMaxM2Config
49
+
50
+
51
+ class MiniMaxM2MLP(nn.Module):
52
+ def __init__(self, config: MiniMaxM2Config):
53
+ super().__init__()
54
+ self.ffn_dim = config.intermediate_size
55
+ self.hidden_dim = config.hidden_size
56
+
57
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
58
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
59
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
60
+
61
+ self.act_fn = ACT2FN[config.hidden_act]
62
+
63
+ def forward(self, hidden_states):
64
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
65
+ current_hidden_states = self.w2(current_hidden_states)
66
+ return current_hidden_states
67
+
68
+
69
+ class MiniMaxM2Experts(nn.ModuleList):
70
+ """
71
+ ModuleList of experts.
72
+ """
73
+
74
+ def __init__(self, config: MiniMaxM2Config):
75
+ super().__init__()
76
+ self.top_k = config.num_experts_per_tok
77
+ self.num_experts = config.num_local_experts
78
+ for _ in range(self.num_experts):
79
+ self.append(MiniMaxM2MLP(config))
80
+
81
+ def forward(
82
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
83
+ ) -> torch.Tensor:
84
+ """
85
+ Args:
86
+ hidden_states: (batch_size * sequence_length, hidden_dim)
87
+ selected_experts: (batch_size * sequence_length, top_k)
88
+ routing_weights: (batch_size * sequence_length, top_k)
89
+ Returns:
90
+ (batch_size * sequence_length, hidden_dim)
91
+ """
92
+ final_hidden_states = torch.zeros_like(hidden_states)
93
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
94
+
95
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
96
+ for expert_idx in expert_hit:
97
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
98
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
99
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
100
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
101
+ return final_hidden_states
102
+
103
+
104
+ class MiniMaxM2SparseMoeBlock(nn.Module):
105
+ def __init__(self, config):
106
+ super().__init__()
107
+ self.top_k = config.num_experts_per_tok
108
+ self.jitter_noise = config.router_jitter_noise
109
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
110
+ self.experts = MiniMaxM2Experts(config)
111
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
112
+
113
+ def route_tokens_to_experts(self, router_logits):
114
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
115
+ scores_for_choice = routing_weights + self.e_score_correction_bias
116
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
117
+ top_k_weights = routing_weights.gather(1, top_k_index)
118
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
119
+ return top_k_index, top_k_weights.to(router_logits.dtype)
120
+
121
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
122
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
123
+ if self.training and self.jitter_noise > 0:
124
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
125
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
126
+ router_logits = self.gate(hidden_states)
127
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
128
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
129
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
130
+ return hidden_states, router_logits
131
+
132
+
133
+ @use_kernel_forward_from_hub("RMSNorm")
134
+ class MiniMaxM2RMSNorm(nn.Module):
135
+ def __init__(self, hidden_size, eps=1e-6):
136
+ """
137
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
138
+ """
139
+ super().__init__()
140
+ self.weight = nn.Parameter(torch.ones(hidden_size))
141
+ self.variance_epsilon = eps
142
+
143
+ def forward(self, hidden_states):
144
+ input_dtype = hidden_states.dtype
145
+ hidden_states = hidden_states.to(torch.float32)
146
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
147
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
148
+ return self.weight * hidden_states.to(input_dtype)
149
+
150
+ def extra_repr(self):
151
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
152
+
153
+
154
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
155
+ """
156
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
157
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
158
+ """
159
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
160
+ if n_rep == 1:
161
+ return hidden_states
162
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
163
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
164
+
165
+
166
+ def eager_attention_forward(
167
+ module: nn.Module,
168
+ query: torch.Tensor,
169
+ key: torch.Tensor,
170
+ value: torch.Tensor,
171
+ attention_mask: Optional[torch.Tensor],
172
+ scaling: float,
173
+ dropout: float = 0.0,
174
+ **kwargs: Unpack[TransformersKwargs],
175
+ ):
176
+ key_states = repeat_kv(key, module.num_key_value_groups)
177
+ value_states = repeat_kv(value, module.num_key_value_groups)
178
+
179
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
180
+ if attention_mask is not None:
181
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
182
+ attn_weights = attn_weights + causal_mask
183
+
184
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
185
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
186
+ attn_output = torch.matmul(attn_weights, value_states)
187
+ attn_output = attn_output.transpose(1, 2).contiguous()
188
+
189
+ return attn_output, attn_weights
190
+
191
+
192
+ def rotate_half(x):
193
+ """Rotates half the hidden dims of the input."""
194
+ x1 = x[..., : x.shape[-1] // 2]
195
+ x2 = x[..., x.shape[-1] // 2 :]
196
+ return torch.cat((-x2, x1), dim=-1)
197
+
198
+
199
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
200
+ """Applies Rotary Position Embedding to the query and key tensors.
201
+
202
+ Args:
203
+ q (`torch.Tensor`): The query tensor.
204
+ k (`torch.Tensor`): The key tensor.
205
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
206
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
207
+ position_ids (`torch.Tensor`, *optional*):
208
+ Deprecated and unused.
209
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
210
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
211
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
212
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
213
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
214
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
215
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
216
+ Returns:
217
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
218
+ """
219
+ cos = cos.unsqueeze(unsqueeze_dim)
220
+ sin = sin.unsqueeze(unsqueeze_dim)
221
+
222
+ # Keep half or full tensor for later concatenation
223
+ rotary_dim = cos.shape[-1]
224
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
225
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
226
+
227
+ # Apply rotary embeddings on the first half or full tensor
228
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
229
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
230
+
231
+ # Concatenate back to full shape
232
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
233
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
234
+ return q_embed, k_embed
235
+
236
+
237
+ class MiniMaxM2Attention(nn.Module):
238
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
239
+
240
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
241
+ super().__init__()
242
+ self.config = config
243
+ self.layer_idx = layer_idx
244
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
245
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
246
+ self.scaling = self.head_dim**-0.5
247
+ self.attention_dropout = config.attention_dropout
248
+ self.is_causal = True
249
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
250
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
251
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
252
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
253
+
254
+ self.use_qk_norm = config.use_qk_norm
255
+ if self.use_qk_norm:
256
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
257
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
258
+
259
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
260
+ def forward(
261
+ self,
262
+ hidden_states: torch.Tensor,
263
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
264
+ attention_mask: Optional[torch.Tensor],
265
+ past_key_values: Optional[Cache] = None,
266
+ cache_position: Optional[torch.LongTensor] = None,
267
+ **kwargs: Unpack[FlashAttentionKwargs],
268
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
269
+ input_shape = hidden_states.shape[:-1]
270
+ hidden_shape = (*input_shape, -1, self.head_dim)
271
+
272
+ query_states = self.q_proj(hidden_states)
273
+ key_states = self.k_proj(hidden_states)
274
+ value_states = self.v_proj(hidden_states)
275
+
276
+ if self.use_qk_norm: # main diff from Llama
277
+ query_states = self.q_norm(query_states)
278
+ key_states = self.k_norm(key_states)
279
+
280
+ key_states = key_states.view(hidden_shape)
281
+ query_states = query_states.view(hidden_shape)
282
+ value_states = value_states.view(hidden_shape)
283
+
284
+ query_states = query_states.transpose(1, 2)
285
+ key_states = key_states.transpose(1, 2)
286
+ value_states = value_states.transpose(1, 2)
287
+
288
+ cos, sin = position_embeddings
289
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
290
+
291
+ if past_key_values is not None:
292
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
293
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
294
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
295
+
296
+ attention_interface: Callable = eager_attention_forward
297
+ if self.config._attn_implementation != "eager":
298
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
299
+
300
+ attn_output, attn_weights = attention_interface(
301
+ self,
302
+ query_states,
303
+ key_states,
304
+ value_states,
305
+ attention_mask,
306
+ dropout=0.0 if not self.training else self.attention_dropout,
307
+ scaling=self.scaling,
308
+ **kwargs,
309
+ )
310
+
311
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
312
+ attn_output = self.o_proj(attn_output)
313
+ return attn_output, attn_weights
314
+
315
+
316
+ class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
317
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
318
+ super().__init__()
319
+ self.hidden_size = config.hidden_size
320
+
321
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
322
+
323
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
324
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
325
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
326
+
327
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
328
+ def forward(
329
+ self,
330
+ hidden_states: torch.Tensor,
331
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
332
+ attention_mask: Optional[torch.Tensor] = None,
333
+ position_ids: Optional[torch.LongTensor] = None,
334
+ past_key_values: Optional[Cache] = None,
335
+ cache_position: Optional[torch.LongTensor] = None,
336
+ **kwargs: Unpack[TransformersKwargs],
337
+ ) -> torch.FloatTensor:
338
+ residual = hidden_states
339
+
340
+ hidden_states = self.input_layernorm(hidden_states)
341
+
342
+ # Self Attention
343
+ hidden_states, _ = self.self_attn(
344
+ hidden_states=hidden_states,
345
+ position_embeddings=position_embeddings,
346
+ attention_mask=attention_mask,
347
+ position_ids=position_ids,
348
+ past_key_values=past_key_values,
349
+ cache_position=cache_position,
350
+ **kwargs,
351
+ )
352
+ hidden_states = residual + hidden_states
353
+
354
+ # Fully Connected
355
+ residual = hidden_states
356
+ hidden_states = self.post_attention_layernorm(hidden_states)
357
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
358
+ hidden_states = residual + hidden_states
359
+
360
+ return hidden_states
361
+
362
+
363
+ class MiniMaxM2RotaryEmbedding(nn.Module):
364
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
365
+
366
+ def __init__(self, config: MiniMaxM2Config, device=None):
367
+ super().__init__()
368
+ # BC: "rope_type" was originally "type"
369
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
370
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
371
+ else:
372
+ self.rope_type = "default"
373
+ self.max_seq_len_cached = config.max_position_embeddings
374
+ self.original_max_seq_len = config.max_position_embeddings
375
+
376
+ self.config = config
377
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
378
+
379
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
380
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
381
+ self.original_inv_freq = self.inv_freq
382
+
383
+ @torch.no_grad()
384
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
385
+ def forward(self, x, position_ids):
386
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
387
+ position_ids_expanded = position_ids[:, None, :].float()
388
+
389
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
390
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
391
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
392
+ emb = torch.cat((freqs, freqs), dim=-1)
393
+ cos = emb.cos() * self.attention_scaling
394
+ sin = emb.sin() * self.attention_scaling
395
+
396
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
397
+
398
+
399
+ @auto_docstring
400
+ class MiniMaxM2PreTrainedModel(PreTrainedModel):
401
+ config: MiniMaxM2Config
402
+ base_model_prefix = "model"
403
+ supports_gradient_checkpointing = True
404
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
405
+ _skip_keys_device_placement = ["past_key_values"]
406
+ _supports_flash_attn = True
407
+ _supports_sdpa = True
408
+ _supports_flex_attn = True
409
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
410
+ _supports_attention_backend = True
411
+ _can_record_outputs = {
412
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
413
+ "hidden_states": MiniMaxM2DecoderLayer,
414
+ "attentions": MiniMaxM2Attention,
415
+ }
416
+
417
+
418
+ @auto_docstring
419
+ class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
420
+ def __init__(self, config: MiniMaxM2Config):
421
+ super().__init__(config)
422
+ self.padding_idx = config.pad_token_id
423
+ self.vocab_size = config.vocab_size
424
+
425
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
426
+ self.layers = nn.ModuleList(
427
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
428
+ )
429
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
430
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
431
+ self.gradient_checkpointing = False
432
+
433
+ # Initialize weights and apply final processing
434
+ self.post_init()
435
+
436
+ @check_model_inputs
437
+ @auto_docstring
438
+ def forward(
439
+ self,
440
+ input_ids: Optional[torch.LongTensor] = None,
441
+ attention_mask: Optional[torch.Tensor] = None,
442
+ position_ids: Optional[torch.LongTensor] = None,
443
+ past_key_values: Optional[Cache] = None,
444
+ inputs_embeds: Optional[torch.FloatTensor] = None,
445
+ use_cache: Optional[bool] = None,
446
+ cache_position: Optional[torch.LongTensor] = None,
447
+ **kwargs: Unpack[TransformersKwargs],
448
+ ) -> MoeModelOutputWithPast:
449
+ if (input_ids is None) ^ (inputs_embeds is not None):
450
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
451
+
452
+ if use_cache and past_key_values is None:
453
+ past_key_values = DynamicCache(config=self.config)
454
+
455
+ if inputs_embeds is None:
456
+ inputs_embeds = self.embed_tokens(input_ids)
457
+
458
+ if cache_position is None:
459
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
460
+ cache_position = torch.arange(
461
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
462
+ )
463
+ if position_ids is None:
464
+ position_ids = cache_position.unsqueeze(0)
465
+
466
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
467
+ causal_mask = mask_function(
468
+ config=self.config,
469
+ input_embeds=inputs_embeds,
470
+ attention_mask=attention_mask,
471
+ cache_position=cache_position,
472
+ past_key_values=past_key_values,
473
+ position_ids=position_ids,
474
+ )
475
+
476
+ hidden_states = inputs_embeds
477
+
478
+ # create position embeddings to be shared across the decoder layers
479
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
480
+
481
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
482
+ hidden_states = decoder_layer(
483
+ hidden_states,
484
+ position_embeddings=position_embeddings,
485
+ attention_mask=causal_mask,
486
+ position_ids=position_ids,
487
+ past_key_values=past_key_values,
488
+ use_cache=use_cache,
489
+ cache_position=cache_position,
490
+ **kwargs,
491
+ )
492
+
493
+ hidden_states = self.norm(hidden_states)
494
+
495
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
496
+ last_hidden_state=hidden_states,
497
+ past_key_values=past_key_values,
498
+ )
499
+
500
+
501
+ def load_balancing_loss_func(
502
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
503
+ num_experts: Optional[int] = None,
504
+ top_k=2,
505
+ attention_mask: Optional[torch.Tensor] = None,
506
+ ) -> Union[torch.Tensor, int]:
507
+ r"""
508
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
509
+
510
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
511
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
512
+ experts is too unbalanced.
513
+
514
+ Args:
515
+ gate_logits:
516
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
517
+ shape [batch_size X sequence_length, num_experts].
518
+ num_experts:
519
+ Number of experts
520
+ top_k:
521
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
522
+ parameter.
523
+ attention_mask (`torch.Tensor`, *optional*):
524
+ The attention_mask used in forward function
525
+ shape [batch_size X sequence_length] if not None.
526
+
527
+ Returns:
528
+ The auxiliary loss.
529
+ """
530
+ if gate_logits is None or not isinstance(gate_logits, tuple):
531
+ return 0
532
+
533
+ if isinstance(gate_logits, tuple):
534
+ compute_device = gate_logits[0].device
535
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
536
+
537
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
538
+
539
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
540
+
541
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
542
+
543
+ if attention_mask is None:
544
+ # Compute the percentage of tokens routed to each experts
545
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
546
+
547
+ # Compute the average probability of routing to these experts
548
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
549
+ else:
550
+ batch_size, sequence_length = attention_mask.shape
551
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
552
+
553
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
554
+ expert_attention_mask = (
555
+ attention_mask[None, :, :, None, None]
556
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
557
+ .reshape(-1, top_k, num_experts)
558
+ .to(compute_device)
559
+ )
560
+
561
+ # Compute the percentage of tokens routed to each experts
562
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
563
+ expert_attention_mask, dim=0
564
+ )
565
+
566
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
567
+ router_per_expert_attention_mask = (
568
+ attention_mask[None, :, :, None]
569
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
570
+ .reshape(-1, num_experts)
571
+ .to(compute_device)
572
+ )
573
+
574
+ # Compute the average probability of routing to these experts
575
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
576
+ router_per_expert_attention_mask, dim=0
577
+ )
578
+
579
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
580
+ return overall_loss * num_experts
581
+
582
+
583
+ @auto_docstring
584
+ class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
585
+ _tied_weights_keys = ["lm_head.weight"]
586
+ _tp_plan = {"lm_head": "colwise_rep"}
587
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
588
+
589
+ def __init__(self, config):
590
+ super().__init__(config)
591
+ self.model = MiniMaxM2Model(config)
592
+ self.vocab_size = config.vocab_size
593
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
594
+ self.router_aux_loss_coef = config.router_aux_loss_coef
595
+ self.num_experts = config.num_local_experts
596
+ self.num_experts_per_tok = config.num_experts_per_tok
597
+
598
+ # Initialize weights and apply final processing
599
+ self.post_init()
600
+
601
+ @can_return_tuple
602
+ @auto_docstring
603
+ def forward(
604
+ self,
605
+ input_ids: Optional[torch.LongTensor] = None,
606
+ attention_mask: Optional[torch.Tensor] = None,
607
+ position_ids: Optional[torch.LongTensor] = None,
608
+ past_key_values: Optional[Cache] = None,
609
+ inputs_embeds: Optional[torch.FloatTensor] = None,
610
+ labels: Optional[torch.LongTensor] = None,
611
+ use_cache: Optional[bool] = None,
612
+ output_router_logits: Optional[bool] = None,
613
+ cache_position: Optional[torch.LongTensor] = None,
614
+ logits_to_keep: Union[int, torch.Tensor] = 0,
615
+ **kwargs: Unpack[TransformersKwargs],
616
+ ) -> MoeCausalLMOutputWithPast:
617
+ r"""
618
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
619
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
620
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
621
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
622
+
623
+ Example:
624
+
625
+ ```python
626
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
627
+
628
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
629
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
630
+
631
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
632
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
633
+
634
+ >>> # Generate
635
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
636
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
637
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
638
+ ```"""
639
+
640
+ output_router_logits = (
641
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
642
+ )
643
+
644
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
645
+ outputs: MoeModelOutputWithPast = self.model(
646
+ input_ids=input_ids,
647
+ attention_mask=attention_mask,
648
+ position_ids=position_ids,
649
+ past_key_values=past_key_values,
650
+ inputs_embeds=inputs_embeds,
651
+ use_cache=use_cache,
652
+ output_router_logits=output_router_logits,
653
+ cache_position=cache_position,
654
+ **kwargs,
655
+ )
656
+
657
+ hidden_states = outputs.last_hidden_state
658
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
659
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
660
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
661
+
662
+ loss = None
663
+ if labels is not None:
664
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
665
+
666
+ aux_loss = None
667
+ if output_router_logits:
668
+ aux_loss = load_balancing_loss_func(
669
+ outputs.router_logits,
670
+ self.num_experts,
671
+ self.num_experts_per_tok,
672
+ attention_mask,
673
+ )
674
+ if labels is not None:
675
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
676
+
677
+ return MoeCausalLMOutputWithPast(
678
+ loss=loss,
679
+ aux_loss=aux_loss,
680
+ logits=logits,
681
+ past_key_values=outputs.past_key_values,
682
+ hidden_states=outputs.hidden_states,
683
+ attentions=outputs.attentions,
684
+ router_logits=outputs.router_logits,
685
+ )
686
+
687
+
688
+ class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
689
+ pass
690
+
691
+
692
+ class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
693
+ pass
694
+
695
+
696
+ class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
697
+ pass
698
+
699
+
700
+ __all__ = [
701
+ "MiniMaxM2ForCausalLM",
702
+ "MiniMaxM2ForQuestionAnswering",
703
+ "MiniMaxM2Model",
704
+ "MiniMaxM2PreTrainedModel",
705
+ "MiniMaxM2ForSequenceClassification",
706
+ "MiniMaxM2ForTokenClassification",
707
+ ]
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "200000": {
4
+ "content": "]!p~[",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "200001": {
12
+ "content": "<fim_prefix>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "200002": {
20
+ "content": "<fim_middle>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "200003": {
28
+ "content": "<fim_suffix>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "200004": {
36
+ "content": "<fim_pad>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "200005": {
44
+ "content": "<reponame>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "200006": {
52
+ "content": "<filename>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "200007": {
60
+ "content": "<gh_stars>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "200008": {
68
+ "content": "<issue_start>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "200009": {
76
+ "content": "<issue_comment>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "200010": {
84
+ "content": "<issue_closed>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "200011": {
92
+ "content": "<jupyter_start>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "200012": {
100
+ "content": "<jupyter_text>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "200013": {
108
+ "content": "<jupyter_code>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "200014": {
116
+ "content": "<jupyter_output>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "200015": {
124
+ "content": "<empty_output>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "200016": {
132
+ "content": "<commit_before>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "200017": {
140
+ "content": "<commit_msg>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "200018": {
148
+ "content": "<commit_after>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "200019": {
156
+ "content": "]~b]",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "200020": {
164
+ "content": "[e~[",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "200021": {
172
+ "content": "]!d~[",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "200022": {
180
+ "content": "<function_call>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "200023": {
188
+ "content": "<code_interpreter>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "200024": {
196
+ "content": "]<]speech[>[",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "200025": {
204
+ "content": "]<]image[>[",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "200026": {
212
+ "content": "]<]video[>[",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "200027": {
220
+ "content": "]<]start of speech[>[",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "200028": {
228
+ "content": "]<]end of speech[>[",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "200029": {
236
+ "content": "]<]start of image[>[",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "200030": {
244
+ "content": "]<]end of image[>[",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "200031": {
252
+ "content": "]<]start of video[>[",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "200032": {
260
+ "content": "]<]end of video[>[",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "200033": {
268
+ "content": "]<]vision pad[>[",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "200034": {
276
+ "content": "]~!b[",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "200035": {
284
+ "content": "<jupyter_error>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "200036": {
292
+ "content": "<add_file>",
293
+ "single_word": false,
294
+ "lstrip": false,
295
+ "rstrip": false,
296
+ "normalized": false,
297
+ "special": true
298
+ },
299
+ "200037": {
300
+ "content": "<delete_file>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "200038": {
308
+ "content": "<rename_file>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "200039": {
316
+ "content": "<edit_file>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "200040": {
324
+ "content": "<commit_message>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "200041": {
332
+ "content": "<empty_source_file>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "200042": {
340
+ "content": "<repo_struct>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "200043": {
348
+ "content": "<code_context>",
349
+ "single_word": false,
350
+ "lstrip": false,
351
+ "rstrip": false,
352
+ "normalized": false,
353
+ "special": true
354
+ },
355
+ "200044": {
356
+ "content": "<file_content>",
357
+ "single_word": false,
358
+ "lstrip": false,
359
+ "rstrip": false,
360
+ "normalized": false,
361
+ "special": true
362
+ },
363
+ "200045": {
364
+ "content": "<source_files>",
365
+ "single_word": false,
366
+ "lstrip": false,
367
+ "rstrip": false,
368
+ "normalized": false,
369
+ "special": true
370
+ },
371
+ "200046": {
372
+ "content": "<pr_start>",
373
+ "single_word": false,
374
+ "lstrip": false,
375
+ "rstrip": false,
376
+ "normalized": false,
377
+ "special": true
378
+ },
379
+ "200047": {
380
+ "content": "<review_comment>",
381
+ "single_word": false,
382
+ "lstrip": false,
383
+ "rstrip": false,
384
+ "normalized": false,
385
+ "special": true
386
+ },
387
+ "200048": {
388
+ "content": "<filepath>",
389
+ "single_word": false,
390
+ "lstrip": false,
391
+ "rstrip": false,
392
+ "normalized": false,
393
+ "special": true
394
+ },
395
+ "200049": {
396
+ "content": "<file_sep>",
397
+ "single_word": false,
398
+ "lstrip": false,
399
+ "rstrip": false,
400
+ "normalized": false,
401
+ "special": true
402
+ },
403
+ "200050": {
404
+ "content": "<think>",
405
+ "single_word": false,
406
+ "lstrip": false,
407
+ "rstrip": false,
408
+ "normalized": false,
409
+ "special": false
410
+ },
411
+ "200051": {
412
+ "content": "</think>",
413
+ "single_word": false,
414
+ "lstrip": false,
415
+ "rstrip": false,
416
+ "normalized": false,
417
+ "special": false
418
+ },
419
+ "200052": {
420
+ "content": "<minimax:tool_call>",
421
+ "single_word": false,
422
+ "lstrip": false,
423
+ "rstrip": false,
424
+ "normalized": false,
425
+ "special": false
426
+ },
427
+ "200053": {
428
+ "content": "</minimax:tool_call>",
429
+ "single_word": false,
430
+ "lstrip": false,
431
+ "rstrip": false,
432
+ "normalized": false,
433
+ "special": false
434
+ }
435
+ },
436
+ "additional_special_tokens": [
437
+ "<code_interpreter>",
438
+ "<commit_after>",
439
+ "<commit_before>",
440
+ "<commit_msg>",
441
+ "<empty_output>",
442
+ "<filename>",
443
+ "<fim_middle>",
444
+ "<fim_pad>",
445
+ "<fim_prefix>",
446
+ "<fim_suffix>",
447
+ "<function_call>",
448
+ "<gh_stars>",
449
+ "]<]speech[>[",
450
+ "]<]image[>[",
451
+ "]<]video[>[",
452
+ "]<]start of speech[>[",
453
+ "]<]end of speech[>[",
454
+ "]<]start of image[>[",
455
+ "]<]end of image[>[",
456
+ "]<]start of video[>[",
457
+ "]<]end of video[>[",
458
+ "]<]vision pad[>[",
459
+ "]~!b[",
460
+ "<issue_closed>",
461
+ "<issue_comment>",
462
+ "<issue_start>",
463
+ "<jupyter_code>",
464
+ "<jupyter_output>",
465
+ "<jupyter_start>",
466
+ "<jupyter_text>",
467
+ "<reponame>",
468
+ "[e~[",
469
+ "]!d~[",
470
+ "]!p~[",
471
+ "]~b]",
472
+ "<jupyter_error>",
473
+ "<add_file>",
474
+ "<delete_file>",
475
+ "<rename_file>",
476
+ "<edit_file>",
477
+ "<commit_message>",
478
+ "<empty_source_file>",
479
+ "<repo_struct>",
480
+ "<code_context>",
481
+ "<file_content>",
482
+ "<source_files>",
483
+ "<pr_start>",
484
+ "<review_comment>",
485
+ "<filepath>",
486
+ "<file_sep>"
487
+ ],
488
+ "add_prefix_space": false,
489
+ "bos_token": "]~!b[",
490
+ "clean_up_tokenization_spaces": false,
491
+ "eos_token": "[e~[",
492
+ "model_max_length": 40960000,
493
+ "tokenizer_class": "GPT2Tokenizer",
494
+ "unk_token": "]!d~["
495
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff