SenseVoicecpp encoder识别语音[AI人工智能(六十九)]—东方仙盟
sense-voice-encoder
核心代码

完整代码
//
// Created by lovemefan on 2024/7/19.
//
#include "sense-voice-encoder.h"
#include <cmath>
#include <cassert>
#include <map>
#include <string>
#include <vector>
#define SENSE_VOICE_ENCODER_MAX_NODES 8192
#define WARP_SIZE 32
// faster matrix multiplications for tensors that do not have dimension 0 divisible by "pad"
// the idea is to represent the original matrix multiplication:
//
// Z = X @ Y
//
// with the sum of two matrix multiplications:
//
// Z = (X_0 @ Y_0) + (X_1 @ Y_1)
//
// here X_0 and Y_0 are views of X and Y that have dimension 0 divisible by "pad"
// and X_1 and Y_1 are the remaining views. X_1 and Y_1 end up being small matrices that can be processed with more
// general-purpose kernels
//
static struct ggml_tensor * ggml_mul_mat_pad(struct ggml_context * ctx, struct ggml_tensor * x, struct ggml_tensor * y, int pad = 32) {
// use padding only if dimension 0 is at least 8 times larger than the padding
// else we won't get much benefit from the optimization
const int n_pad_req = 8;
if (x->ne[0] % pad == 0 || x->ne[0] / pad < n_pad_req) {
return ggml_mul_mat(ctx, x, y);
}
struct ggml_tensor * x_0 = ggml_view_3d(ctx, x, (x->ne[0]/pad)*pad, x->ne[1], x->ne[2], x->nb[1], x->nb[2], 0);
struct ggml_tensor * x_1 = ggml_view_3d(ctx, x, x->ne[0]%pad, x->ne[1], x->ne[2], x->nb[1], x->nb[2], x_0->ne[0]*x_0->nb[0]);
struct ggml_tensor * y_0 = ggml_view_3d(ctx, y, (y->ne[0]/pad)*pad, y->ne[1], y->ne[2], y->nb[1], y->nb[2], 0);
struct ggml_tensor * y_1 = ggml_view_3d(ctx, y, y->ne[0]%pad, y->ne[1], y->ne[2], y->nb[1], y->nb[2], y_0->ne[0]*y_0->nb[0]);
return ggml_add(ctx,
ggml_mul_mat(ctx, x_0, y_0),
ggml_mul_mat(ctx, x_1, y_1));
}
// copy from whisper.cpp
// TODO: CUDA is currently broken - seems ggml_mul_mat does not handle views correctly
#if defined(GGML_USE_METAL)
#define ggml_mul_mat ggml_mul_mat_pad
#endif
static const size_t MB = 1ull * 1024 * 1024;
struct sense_voice_context *sense_voice_init(struct gguf_context *g_context) {
ggml_time_init();
struct sense_voice_context *ctx = new sense_voice_context;
return ctx;
}
struct sense_voice_context_params sense_voice_context_default_params() {
struct sense_voice_context_params result = {
/*.use_gpu =*/ true,
/*.flash_attn =*/ false,
/*.use_itn =*/ false,
/*.gpu_device =*/ 0
};
return result;
}
static struct ggml_tensor *encoder_layer_sanm_forward(const sense_voice_hparams &hparams,
sense_voice_context &sctx,
ggml_context *ctx0,
ggml_tensor *cur,
sense_voice_layer_encoder &layer,
ggml_cgraph *gf,
bool user_flash_attn){
const int n_state = hparams.n_encoder_hidden_state;
const int n_head = hparams.n_encoder_attention_heads;
auto state = sctx.state;
struct ggml_tensor *residual = nullptr;
if (layer.e_norm_w1->ne[0] == layer.e_norm_w2->ne[0]) {
residual = ggml_cpy(
ctx0, cur,
ggml_new_tensor_3d(ctx0, cur->type, cur->ne[0], cur->ne[1], cur->ne[2]));
}
{
// layer norm
// cur = ln_0_w*cur + ln_0_b
#ifdef GGML_CUDA
int32_t dim_size = cur->ne[0];
if (sctx.params.use_gpu && dim_size % WARP_SIZE) {
int32_t pad_size = WARP_SIZE - (dim_size % WARP_SIZE);
ggml_tensor *mean = ggml_mean(ctx0, cur);
cur = ggml_sub(ctx0, cur, mean);
ggml_tensor *sigma = ggml_mul(ctx0, cur, cur);
sigma = ggml_sum_rows(ctx0, sigma);
cur = ggml_scale(ctx0, ggml_div(ctx0, cur, ggml_sqrt(ctx0, sigma)), sqrt(dim_size));
// cur = ggml_cont(ctx0, ggml_pad(ctx0, cur, pad_size, 0, 0, 0));
// cur = ggml_norm(ctx0, cur, hparams.eps);
// cur = ggml_cont(ctx0, ggml_view_4d(ctx0, cur, dim_size, cur->ne[1], cur->ne[2], cur->ne[3], cur->nb[1], cur->nb[2], cur->nb[3], 0));
// cur = ggml_scale(ctx0, cur, sqrt(float(dim_size) / (dim_size + pad_size)));
}else{
cur = ggml_norm(ctx0, cur, hparams.eps);
}
#else
cur = ggml_norm(ctx0, cur, hparams.eps);
#endif
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.e_norm_w1), layer.e_norm_b1);
}
// self attention
{
// self attention linear qkv
// cur = ggml_transpose(ctx0, cur);
// split qkv into separate tensors
// q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
// ref:
// https://github.com/alibaba-damo-academy/FunASR/blob/main/funasr/modules/attention.py#L391-L396
struct ggml_tensor *Q;
struct ggml_tensor *Q_h;
struct ggml_tensor *K;
struct ggml_tensor *K_h;
struct ggml_tensor *V;
struct ggml_tensor *V_h;
int n_ctx = cur->ne[1];
int n_batch = cur->ne[2];
Q = ggml_add(ctx0,
ggml_mul_mat_pad(ctx0, layer.e_attn_ln_q_w, cur),
layer.e_attn_ln_q_b);
Q_h = ggml_reshape_4d(ctx0, Q, n_state / n_head, n_head, n_ctx, n_batch);
Q_h = ggml_permute(ctx0, Q_h, 0, 2, 1, 3);
Q_h = ggml_cont(ctx0, Q_h);
ggml_set_name(Q_h, "attention_Q");
K = ggml_add(ctx0,
ggml_mul_mat(ctx0, layer.e_attn_ln_k_w, cur),
layer.e_attn_ln_k_b);
K_h = ggml_reshape_4d(ctx0, K, n_state / n_head, n_head, n_ctx, n_batch);
K_h = ggml_permute(ctx0, K_h, 0, 2, 1, 3);
K_h = ggml_cont(ctx0, K_h);
// K = ggml_reshape_3d(ctx0, K, n_state, n_ctx, n_head);
ggml_set_name(K_h, "attention_K");
V = ggml_add(ctx0,
ggml_mul_mat(ctx0, layer.e_attn_ln_v_w, cur),
layer.e_attn_ln_v_b);
ggml_set_name(V, "attention_V");
V_h = ggml_reshape_4d(ctx0, V, n_state / n_head, n_head, n_ctx, n_batch);
V_h = ggml_permute(ctx0, V_h, 0, 2, 1, 3);
V_h = ggml_cont(ctx0, V_h);
// fsmn forward with V
int padding = (hparams.fsmn_kernel_size - 1) / 2;
struct ggml_tensor *fsmn_memory = nullptr;
// conv depth wise
{
{
// implement conv depth wise with groups=input_channel implement
// same in pytorch : F.conv1d(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=n_state)
struct ggml_tensor * a = layer.e_attn_fsmn_w;
struct ggml_tensor * b = ggml_cont(ctx0, ggml_transpose(ctx0, V));
// Process each batch separately and concatenate results
// for (int i = 0; i < b->ne[2]; i++) {
// // View for current batch
// struct ggml_tensor *b_batch = ggml_view_3d(ctx0, b, b->ne[0], b->ne[1], 1, b->nb[1], b->nb[2], i * b->nb[2]);
// struct ggml_tensor *im2col = ggml_im2col(ctx0, a, ggml_reshape_4d(ctx0, b_batch, b_batch->ne[0], 1, b_batch->ne[1], b_batch->ne[2] * b_batch->ne[3]), 1, 0, padding, 0, 1, 0, false, GGML_TYPE_F32);
// struct ggml_tensor * result = ggml_mul_mat(ctx0, a, im2col);
// struct ggml_tensor * fsmn_memory_batch = ggml_reshape_3d(ctx0, result, im2col->ne[1], b_batch->ne[1], b_batch->ne[2]);
// if (fsmn_memory == nullptr) {
// fsmn_memory = fsmn_memory_batch;
// } else {
// fsmn_memory = ggml_concat(ctx0, fsmn_memory, fsmn_memory_batch, 2);
// }
// }
struct ggml_tensor * im2col = ggml_im2col(ctx0, a, ggml_reshape_4d(ctx0, b, b->ne[0], 1, b->ne[1] * b->ne[2], b->ne[3]), 1, 0, padding, 0, 1, 0, false, GGML_TYPE_F32);
im2col = ggml_reshape_4d(ctx0, im2col, im2col->ne[0], im2col->ne[1], im2col->ne[2] / n_batch, n_batch);
a = ggml_repeat(ctx0, ggml_cast(ctx0, a, GGML_TYPE_F32), ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, a->ne[0], a->ne[1], a->ne[2], n_batch));
struct ggml_tensor * result = ggml_mul_mat(ctx0, a, im2col);
fsmn_memory = ggml_reshape_3d(ctx0, result, im2col->ne[1], im2col->ne[2], im2col->ne[3]);
}
fsmn_memory = ggml_cont(ctx0, ggml_transpose(ctx0, fsmn_memory));
fsmn_memory = ggml_add(ctx0, fsmn_memory, V);
ggml_set_name(fsmn_memory, "fsmn_memory");
}
float KQscale = 1.0f / sqrtf(float(n_state) / n_head);
if(user_flash_attn){
const int n_ctx_pad = GGML_PAD(n_ctx, 256);
const int n_state_head = n_state / n_head;
ggml_build_forward_expand(gf, ggml_cpy(ctx0, K, ggml_view_1d(ctx0, state->kv_pad.k, n_ctx*n_state*n_batch, 0)));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, V, ggml_view_1d(ctx0, state->kv_pad.v, n_ctx*n_state*n_batch, 0)));
struct ggml_tensor * K =
ggml_view_4d(ctx0, state->kv_pad.k,
n_state_head, n_ctx_pad, n_head, n_batch,
ggml_element_size(state->kv_pad.k)*n_state,
ggml_element_size(state->kv_pad.k)*n_state_head,
ggml_element_size(state->kv_pad.k)*n_state*n_ctx_pad,
0);
struct ggml_tensor * V =
ggml_view_4d(ctx0, state->kv_pad.v,
n_state_head, n_ctx_pad, n_head, n_batch,
ggml_element_size(state->kv_pad.v)*n_state,
ggml_element_size(state->kv_pad.v)*n_state_head,
ggml_element_size(state->kv_pad.v)*n_state*n_ctx_pad,
0);
ggml_tensor *KQV = ggml_flash_attn_ext(ctx0, Q_h, K, V, nullptr, KQscale, 0.0f, 0.0f);
cur = ggml_reshape_3d(ctx0, KQV, n_state, n_ctx, n_batch);
} else{
// K * Q
struct ggml_tensor *KQ = ggml_mul_mat(ctx0, K_h, Q_h);
struct ggml_tensor *KQ_soft_max = ggml_soft_max_ext(ctx0, KQ, nullptr, KQscale, 0.0f);
ggml_tensor *KQV = ggml_mul_mat(
ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, V_h)), KQ_soft_max);
struct ggml_tensor *KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
cur = ggml_cpy(ctx0,
KQV_merged,
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_state, n_ctx, n_batch));
}
cur = ggml_add(ctx0, ggml_mul_mat(ctx0, layer.e_attn_ln_out_w, cur),
layer.e_attn_ln_out_b);
ggml_set_name(cur, "attention_out");
cur = ggml_add(ctx0, cur, fsmn_memory);
if (layer.e_norm_w1->ne[0] == layer.e_norm_w2->ne[0]) {
cur = ggml_add(ctx0, cur, residual);
}
}
residual = ggml_cpy(
ctx0, cur,
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, cur->ne[0], cur->ne[1], cur->ne[2]));
{
// layer norm after attention
// cur = ln_0_w*cur + ln_0_b
cur = ggml_norm(ctx0, cur, hparams.eps);
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.e_norm_w2), layer.e_norm_b2);
}
{
// position-wise feed forward layer
cur = ggml_add(ctx0, ggml_mul_mat(ctx0, layer.e_mlp_w1, cur),
layer.e_mlp_b1);
cur = ggml_relu(ctx0, cur);
cur = ggml_add(ctx0, ggml_mul_mat(ctx0, layer.e_mlp_w2, cur),
layer.e_mlp_b2);
}
// residual after position wise feed forward
cur = ggml_add(ctx0, cur, residual);
return cur;
}
struct ggml_cgraph *sense_voice_build_graph_encoder(sense_voice_context &pctx,
sense_voice_state &pstate) {
const auto &model = pctx.model.model;
const auto &hparams = pctx.model.hparams;
struct ggml_init_params params = {
/*.mem_size =*/pstate.sched_encode.meta.size(),
/*.mem_buffer =*/pstate.sched_encode.meta.data(),
/*.no_alloc =*/true,
};
struct ggml_context *ctx0 = ggml_init(params);
ggml_cgraph *gf = ggml_new_graph_custom(ctx0, SENSE_VOICE_ENCODER_MAX_NODES, false);
struct ggml_tensor *feature = pstate.feature.tensor;
ggml_set_name(feature, "feats");
ggml_set_input(feature);
struct ggml_tensor *embedding = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, 4, 1);
ggml_set_name(embedding, "embedding");
ggml_set_input(embedding);
embedding = ggml_get_rows(ctx0, model->embedding, embedding);
embedding = ggml_repeat(ctx0, embedding, ggml_new_tensor_3d(ctx0, GGML_TYPE_I32, embedding->ne[0], embedding->ne[1], feature->ne[2]));
struct ggml_tensor *cur = ggml_concat(ctx0, embedding, feature, 1);
cur = ggml_scale(ctx0, cur, sqrtf(hparams.n_encoder_hidden_state));
// implement encoder small forward graph
//ref: https://github.com/modelscope/FunASR/blob/b7b4a83c18277a7022124cad790c08ae703b7a2d/funasr/models/sense_voice/model.py#L558-L583
// [x] 1. sinusoidal position
// [x] 2. encoders0
// [x] 3. encoders
// [x] 4. tp_encoders
// [x] 5. tp_norm
ggml_tensor *position = ggml_new_tensor_3d(ctx0, cur->type, cur->ne[0], cur->ne[1], cur->ne[2]);
ggml_set_name(position, "position");
ggml_set_input(position);
cur = ggml_add(ctx0, position, cur);
// encoders0 forward
cur = encoder_layer_sanm_forward(hparams, pctx, ctx0, cur, model->encoder->encoder0, gf, pctx.params.flash_attn);
// encoders forward
for (int i=0; i < hparams.n_encoder_layers - 1; i++){
cur = encoder_layer_sanm_forward(hparams, pctx, ctx0, cur, model->encoder->encoders_layer[i], gf, pctx.params.flash_attn);
}
{
// after encoder norm
cur = ggml_norm(ctx0, cur, hparams.eps);
cur = ggml_add(ctx0, ggml_mul(ctx0,
cur,
model->encoder->e_after_norm_w),
model->encoder->e_after_norm_b);
}
// tp encoders forward
for (int i=0; i < hparams.n_tp_encoder_layers; i++){
cur = encoder_layer_sanm_forward(hparams, pctx, ctx0, cur, model->encoder->tp_encoders_layer[i], gf, pctx.params.flash_attn);
}
{
// tp encoder norm
cur = ggml_norm(ctx0, cur, hparams.eps);
cur = ggml_add(ctx0, ggml_mul(ctx0,
cur,
model->encoder->e_tp_norm_w),
model->encoder->e_tp_norm_b);
}
ggml_build_forward_expand(gf, cur);
ggml_set_name(cur, "encoder_out");
ggml_set_output(cur);
pstate.encoder_out = cur;
ggml_free(ctx0);
return gf;
}
bool sense_voice_encode_internal(sense_voice_context &ctx,
sense_voice_state &state,
const int n_threads) {
const int64_t t_start_us = ggml_time_us();
// encoder
{
auto & sched = state.sched_encode.sched;
ggml_cgraph *gf = sense_voice_build_graph_encoder(ctx, state);
if (!ggml_backend_sched_alloc_graph(sched, gf)) {
// should never happen as we pre-allocate the memory
return false;
}
// ggml_backend_sched_set_tensor_backend(sched, ggml_graph_get_tensor(gf, ));
// set the inputs
{
struct ggml_tensor *position = ggml_graph_get_tensor(gf, "position");
struct ggml_tensor *embedding = ggml_graph_get_tensor(gf, "embedding");
auto n_len = position->ne[1];
auto dim = position->ne[0];
auto n_batch = position->ne[2];
std::vector<float> _position;
_position.resize(n_len * dim * n_batch);
// construct position embedding
// sinusoidal position embedding
// reference:
// https://github.com/modelscope/FunASR/blob/45d7aa9004763684fb748ee17942ecba81042201/funasr/models/transformer/embedding.py#L392-L405
// P_{k,i} = sin(k/10000^(2i/d)) 0 < i < d/2
// p_{k,j} = cos(k/10000^(2j/d)) d/2 < j < d
for (int b = 0; b < n_batch; b++)
for (int k = 1; k <= n_len; k++) {
for (int i = 0; i < dim / 2; i++) {
_position[b * n_len * dim + (k - 1) * dim + i] = sinf(k * pow(10000, -2.0 * i / dim));
_position[b * n_len * dim + (k - 1) * dim + i + dim / 2] =
cosf(k * pow(10000, -2.0 * i / dim));
}
}
ggml_backend_tensor_set(
position, _position.data(), 0,
ggml_nelements(position) * sizeof(float));
int _embedding[4] = {ctx.language_id, 1, 2, ctx.params.use_itn ? 14 : 15};
ggml_backend_tensor_set(embedding, _embedding, 0, 4*sizeof(int));
}
// ggml_graph_dump_dot(gf, NULL, "sense-voice.dot");
// ggml_backend_sched_set_eval_callback(sched, ctx.params.cb_eval, ctx.params.cb_eval_user_data);
if (!ggml_graph_compute_helper(sched, gf, n_threads)) {
return false;
}
}
state.t_encode_us += ggml_time_us() - t_start_us;
return true;
}
bool set_sense_voice_encoder_layer_sanm(std::vector<sense_voice_layer_encoder> &encoder,
std::map<std::string,
struct ggml_tensor *> &tensors,
int n_encoder_layers,
const std::string &prefix){
for (int i = 0; i < n_encoder_layers; ++i) {
auto layer = &encoder[i];
// map by name
layer->e_attn_ln_out_w =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_out.weight"];
layer->e_attn_ln_out_b =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_out.bias"];
layer->e_attn_ln_q_w =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_q.weight"];
layer->e_attn_ln_q_b =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_q.bias"];
layer->e_attn_ln_k_w =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_k.weight"];
layer->e_attn_ln_k_b =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_k.bias"];
layer->e_attn_ln_v_w =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_v.weight"];
layer->e_attn_ln_v_b =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.linear_v.bias"];
layer->e_attn_fsmn_w =
tensors["encoder." + prefix + "." + std::to_string(i) +
".self_attn.fsmn_block.weight"];
layer->e_mlp_w1 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".feed_forward.w_1.weight"];
layer->e_mlp_b1 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".feed_forward.w_1.bias"];
layer->e_mlp_w2 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".feed_forward.w_2.weight"];
layer->e_mlp_b2 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".feed_forward.w_2.bias"];
layer->e_norm_w1 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".norm1.weight"];
layer->e_norm_b1 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".norm1.bias"];
layer->e_norm_w2 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".norm2.weight"];
layer->e_norm_b2 =
tensors["encoder." + prefix + "." + std::to_string(i) +
".norm2.bias"];
}
return true;
}
把音频特征 → 转换成神经网络能理解的高级语义特征,送给后面的解码器输出文字。
核心作用(超级通俗)
你可以把它理解成:
- 耳朵:听音频信号
- 大脑皮层:提取声音里的文字信息、语调、语言
- 输出:给解码器一段 “已经理解好的语音特征”
它是整个语音识别最核心、计算量最大的部分。
逐模块超简解释
1. ggml_mul_mat_pad
- 优化矩阵乘法
- 让 GPU / CPU 计算更快
- 属于底层加速,不影响业务逻辑
2. 默认参数初始化
- 给模型设置默认值:是否用 GPU、是否用 FlashAttention 等
3. encoder_layer_sanm_forward(核心中的核心)
这是编码器一层神经网络的完整计算逻辑,做这些事:
- 层归一化(LayerNorm)
- 自注意力机制(SelfAttention)→ 让模型关注重要声音
- FSMN 卷积模块 → 捕捉语音局部特征
- 前馈网络(FFN)→ 做特征变换
- 残差连接 → 防止模型退化
简单说:每过一层,音频就变得更 “像文字” 一点。
4. sense_voice_build_graph_encoder
构建整个编码器的计算图
- 把所有神经网络层按顺序拼起来
- 输入:音频特征
- 输出:编码后的语音特征
- 包含:位置编码 + N 层 SANM 编码器 + 归一化
5. sense_voice_encode_internal
真正运行编码器的地方
- 生成位置编码(让模型知道声音顺序)
- 传入语言 ID(中文 / 英文 / 粤语等)
- 执行神经网络计算
- 输出最终音频特征
6. set_sense_voice_encoder_layer_sanm
把模型权重(.gguf 文件里的参数)绑定到神经网络层
- 权重 = 模型学到的知识
- 绑定后,网络才能正确识别语音
整体流程(最关键)
- 输入:音频 fbank 特征(数字序列)
- 加入位置编码(告诉模型声音的先后顺序)
- 加入语言编码(告诉模型讲的是哪国语言)
- 进入 N 层 SANM 编码器(注意力 + 卷积 + 前馈)
- 输出:高级语义特征
- 送给解码器 → 生成文字
用生活比喻
- 音频 = 一段混乱的声音
- 编码器 = 翻译官听声音、理解内容
- 输出特征 = 翻译官心里理解好的句子
- 解码器 = 把理解好的句子写成文字
编码器 = 语音识别的 “理解大脑”
人人皆为创造者,共创方能共成长
每个人都是使用者,也是创造者;是数字世界的消费者,更是价值的生产者与分享者。在智能时代的浪潮里,单打独斗的发展模式早已落幕,唯有开放连接、创意共创、利益共享,才能让个体价值汇聚成生态合力,让技术与创意双向奔赴,实现平台与伙伴的快速成长、共赢致远。
原创永久分成,共赴星辰大海
原创创意共创、永久收益分成,是东方仙盟始终坚守的核心理念。我们坚信,每一份原创智慧都值得被尊重与回馈,以永久分成锚定共创初心,让创意者长期享有价值红利,携手万千伙伴向着科技星辰大海笃定前行,拥抱硅基 生命与数字智能交融的未来,共筑跨越时代的数字文明共同体。
东方仙盟:拥抱知识开源,共筑数字新生态
在全球化与数字化浪潮中,东方仙盟始终秉持开放协作、知识共享的理念,积极拥抱开源技术与开放标准。我们相信,唯有打破技术壁垒、汇聚全球智慧,才能真正推动行业的可持续发展。
开源赋能中小商户:通过将前端异常检测、跨系统数据互联等核心能力开源化,东方仙盟为全球中小商户提供了低成本、高可靠的技术解决方案,让更多商家能够平等享受数字转型的红利。
共建行业标准:我们积极参与国际技术社区,与全球开发者、合作伙伴共同制定开放协议 与技术规范,推动跨境零售、文旅、餐饮等多业态的系统互联互通,构建更加公平、高效的数字生态。
知识普惠,共促发展:通过开源社区 、技术文档与培训体系,东方仙盟致力于将前沿技术转化为可落地的行业实践,赋能全球合作伙伴,共同培育创新人才,推动数字经济 的普惠式增长
阿雪技术观
在科技发展浪潮中,我们不妨积极投身技术共享。不满足于做受益者,更要主动担当贡献者。无论是分享代码、撰写技术博客,还是参与开源项目 维护改进,每一个微小举动都可能蕴含推动技术进步的巨大能量。东方仙盟是汇聚力量的天地,我们携手在此探索硅基 生命,为科技进步添砖加瓦。
Hey folks, in this wild tech - driven world, why not dive headfirst into the whole tech - sharing scene? Don't just be the one reaping all the benefits; step up and be a contributor too. Whether you're tossing out your code snippets , hammering out some tech blogs, or getting your hands dirty with maintaining and sprucing up open - source projects, every little thing you do might just end up being a massive force that pushes tech forward. And guess what? The Eastern FairyAlliance is this awesome place where we all come together. We're gonna team up and explore the whole silicon - based life thing, and in the process, we'll be fueling the growth of technology
AtomGit 是由开放原子开源基金会联合 CSDN 等生态伙伴共同推出的新一代开源与人工智能协作平台。平台坚持“开放、中立、公益”的理念,把代码托管、模型共享、数据集托管、智能体开发体验和算力服务整合在一起,为开发者提供从开发、训练到部署的一站式体验。
更多推荐



所有评论(0)