Skip to content

Commit 1b3518a

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 115dee1 commit 1b3518a

File tree

1 file changed

+57
-56
lines changed

1 file changed

+57
-56
lines changed

neural_compressor/adaptor/ox_utils/weight_only.py

+57-56
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ def quant_tensor(data, num_bits=4, group_size=32, scheme="asym", dtype="int", ra
246246

247247
return q_weight, scale, zero_point
248248

249+
249250
def quant_tensor_k_quant_cpu(data, num_bits=4, group_size=32):
250251
"""Quantize tensor per group based on k quant.
251252
Ref: https://github.com/ggml-org/llama.cpp/blob/64eda5deb9859e87a020e56bab5d2f9ca956f1de/ggml/src/ggml-quants.c
@@ -260,44 +261,44 @@ def quant_tensor_k_quant_cpu(data, num_bits=4, group_size=32):
260261
scale: scale
261262
zero_point: zero point
262263
"""
263-
data = np.reshape(data, (-1, group_size)).astype(np.float32) # (nb, group_size)
264+
data = np.reshape(data, (-1, group_size)).astype(np.float32) # (nb, group_size)
264265
maxq = 2**num_bits - 1
265266
minq = 0
266-
sum_x2 = np.sum(data**2, axis=1, keepdims=True) # (nb, 1)
267-
av_x = np.sqrt(sum_x2 / group_size) # (nb, 1)
268-
weights = np.add(av_x, np.abs(data)) # (nb, group_size)
269-
rmin = np.min(data, axis=1, keepdims=True) # (nb, 1)
270-
rmax = np.max(data, axis=1, keepdims=True) # (nb, 1)
271-
sum_w = np.sum(weights, axis=1, keepdims=True) # (nb, 1)
272-
sum_x = np.sum(weights * data, axis=1, keepdims=True) # (nb, group_size)
273-
iscale = np.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
267+
sum_x2 = np.sum(data**2, axis=1, keepdims=True) # (nb, 1)
268+
av_x = np.sqrt(sum_x2 / group_size) # (nb, 1)
269+
weights = np.add(av_x, np.abs(data)) # (nb, group_size)
270+
rmin = np.min(data, axis=1, keepdims=True) # (nb, 1)
271+
rmax = np.max(data, axis=1, keepdims=True) # (nb, 1)
272+
sum_w = np.sum(weights, axis=1, keepdims=True) # (nb, 1)
273+
sum_x = np.sum(weights * data, axis=1, keepdims=True) # (nb, group_size)
274+
iscale = np.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
274275
mask = rmin != rmax
275276
iscale[mask] = (maxq - minq) / (rmax[mask] - rmin[mask])
276277
scale = 1 / iscale
277-
quant_data = np.clip(np.round(iscale * (data - rmin)), minq, maxq) # (nb, group_size)
278-
diff = scale * quant_data + rmin - data # (nb, group_size)
279-
best_mad = np.sum(weights * diff ** 2, axis=1, keepdims=True) # (nb, 1)
278+
quant_data = np.clip(np.round(iscale * (data - rmin)), minq, maxq) # (nb, group_size)
279+
diff = scale * quant_data + rmin - data # (nb, group_size)
280+
best_mad = np.sum(weights * diff**2, axis=1, keepdims=True) # (nb, 1)
280281
nstep = 20
281282
rdelta = 0.1
282283
# nstep * rdelta = -2 * rrmin, maxq - minq = 2**num_bits - 1
283284
rrmin = -1
284285
for is_ in range(nstep):
285-
iscale_new = np.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
286+
iscale_new = np.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
286287
factor = np.array([rrmin + rdelta * is_ + maxq - minq]).astype(data.dtype)[0]
287288
mask = rmin != rmax
288289
iscale_new[mask] = factor / (rmax[mask] - rmin[mask])
289-
quant_data_new = np.clip(np.round(iscale_new * (data - rmin)), minq, maxq) # (nb, group_size)
290+
quant_data_new = np.clip(np.round(iscale_new * (data - rmin)), minq, maxq) # (nb, group_size)
290291
mul_weights_quant_data_new = weights * quant_data_new
291-
sum_l = np.sum(mul_weights_quant_data_new, axis=1, keepdims=True) # (nb, 1)
292-
sum_l2 = np.sum(mul_weights_quant_data_new * quant_data_new, axis=1, keepdims=True) # (nb, 1)
293-
sum_xl = np.sum(mul_weights_quant_data_new * data, axis=1, keepdims=True) # (nb, 1)
294-
D = np.subtract(sum_w * sum_l2, sum_l ** 2) # (nb, 1)
292+
sum_l = np.sum(mul_weights_quant_data_new, axis=1, keepdims=True) # (nb, 1)
293+
sum_l2 = np.sum(mul_weights_quant_data_new * quant_data_new, axis=1, keepdims=True) # (nb, 1)
294+
sum_xl = np.sum(mul_weights_quant_data_new * data, axis=1, keepdims=True) # (nb, 1)
295+
D = np.subtract(sum_w * sum_l2, sum_l**2) # (nb, 1)
295296

296-
this_scale = (sum_w * sum_xl - sum_x * sum_l) / D # (nb, 1)
297-
this_min = (sum_l2 * sum_x - sum_l * sum_xl) / D # (nb, 1)
297+
this_scale = (sum_w * sum_xl - sum_x * sum_l) / D # (nb, 1)
298+
this_min = (sum_l2 * sum_x - sum_l * sum_xl) / D # (nb, 1)
298299

299-
diff = this_scale * quant_data_new + this_min - data # (nb, group_size)
300-
mad = np.sum(weights * diff ** 2, axis=1, keepdims=True) # (nb, 1)
300+
diff = this_scale * quant_data_new + this_min - data # (nb, group_size)
301+
mad = np.sum(weights * diff**2, axis=1, keepdims=True) # (nb, 1)
301302

302303
mad_1 = np.array(mad)
303304
best_mad_1 = np.array(best_mad)
@@ -307,7 +308,7 @@ def quant_tensor_k_quant_cpu(data, num_bits=4, group_size=32):
307308
scale[idx_to_replace] = this_scale[idx_to_replace]
308309
rmin[idx_to_replace] = this_min[idx_to_replace]
309310

310-
zero_point = np.clip((( - rmin) / scale).round(), 0, maxq).astype("uint8")
311+
zero_point = np.clip(((-rmin) / scale).round(), 0, maxq).astype("uint8")
311312
scale = scale.astype(np.float64)
312313
q_weight = np.empty_like(data, dtype=scale.dtype)
313314
np.divide(data, scale, out=q_weight)
@@ -317,6 +318,7 @@ def quant_tensor_k_quant_cpu(data, num_bits=4, group_size=32):
317318

318319
return q_weight, scale, zero_point
319320

321+
320322
def quant_tensor_k_quant_cuda(data, num_bits=4, group_size=32):
321323
"""Quantize tensor per group based on k quant.
322324
Ref: https://github.com/ggml-org/llama.cpp/blob/64eda5deb9859e87a020e56bab5d2f9ca956f1de/ggml/src/ggml-quants.c
@@ -334,45 +336,46 @@ def quant_tensor_k_quant_cuda(data, num_bits=4, group_size=32):
334336
try:
335337
import cupy as cp
336338
import torch
339+
337340
if torch.cuda.is_available():
338341
data = cp.asarray(data)
339-
data = data.reshape((-1, group_size)).astype(cp.float32) # nb = data.shape[0], (nb, group_size)
342+
data = data.reshape((-1, group_size)).astype(cp.float32) # nb = data.shape[0], (nb, group_size)
340343
maxq = 2**num_bits - 1
341344
minq = 0
342-
sum_x2 = cp.sum(data**2, axis=1, keepdims=True) # (nb, 1)
343-
av_x = cp.sqrt(sum_x2 / group_size) # (nb, 1)
344-
weights = cp.add(av_x, cp.abs(data)) # (nb, group_size)
345-
rmin = cp.min(data, axis=1, keepdims=True) # (nb, 1)
346-
rmax = cp.max(data, axis=1, keepdims=True) # (nb, 1)
347-
sum_w = cp.sum(weights, axis=1, keepdims=True) # (nb, 1)
348-
sum_x = cp.sum(weights * data, axis=1, keepdims=True) # (nb, group_size)
349-
iscale = cp.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
345+
sum_x2 = cp.sum(data**2, axis=1, keepdims=True) # (nb, 1)
346+
av_x = cp.sqrt(sum_x2 / group_size) # (nb, 1)
347+
weights = cp.add(av_x, cp.abs(data)) # (nb, group_size)
348+
rmin = cp.min(data, axis=1, keepdims=True) # (nb, 1)
349+
rmax = cp.max(data, axis=1, keepdims=True) # (nb, 1)
350+
sum_w = cp.sum(weights, axis=1, keepdims=True) # (nb, 1)
351+
sum_x = cp.sum(weights * data, axis=1, keepdims=True) # (nb, group_size)
352+
iscale = cp.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
350353
mask = rmin != rmax
351354
iscale[mask] = (maxq - minq) / (rmax[mask] - rmin[mask])
352355
scale = 1 / iscale
353-
quant_data = cp.clip(cp.round(iscale * (data - rmin)), minq, maxq) # (nb, group_size)
354-
diff = scale * quant_data + rmin - data # (nb, group_size)
355-
best_mad = cp.sum(weights * diff ** 2, axis=1, keepdims=True) # (nb, 1)
356+
quant_data = cp.clip(cp.round(iscale * (data - rmin)), minq, maxq) # (nb, group_size)
357+
diff = scale * quant_data + rmin - data # (nb, group_size)
358+
best_mad = cp.sum(weights * diff**2, axis=1, keepdims=True) # (nb, 1)
356359
nstep = 20
357360
rdelta = 0.1
358361
rrmin = -1
359362
for is_ in range(nstep):
360-
iscale_new = cp.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
363+
iscale_new = cp.ones(rmax.shape, dtype=data.dtype) # (nb, 1)
361364
factor = cp.array([rrmin + rdelta * is_ + maxq - minq]).astype(data.dtype)[0]
362365
mask = rmin != rmax
363366
iscale_new[mask] = factor / (rmax[mask] - rmin[mask])
364-
quant_data_new = cp.clip(cp.round(iscale_new * (data - rmin)), minq, maxq) # (nb, group_size)
367+
quant_data_new = cp.clip(cp.round(iscale_new * (data - rmin)), minq, maxq) # (nb, group_size)
365368
mul_weights_quant_data_new = weights * quant_data_new
366-
sum_l = cp.sum(mul_weights_quant_data_new, axis=1, keepdims=True) # (nb, 1)
367-
sum_l2 = cp.sum(mul_weights_quant_data_new * quant_data_new, axis=1, keepdims=True) # (nb, 1)
368-
sum_xl = cp.sum(mul_weights_quant_data_new * data, axis=1, keepdims=True) # (nb, 1)
369-
D = cp.subtract(sum_w * sum_l2, sum_l ** 2) # (nb, 1)
369+
sum_l = cp.sum(mul_weights_quant_data_new, axis=1, keepdims=True) # (nb, 1)
370+
sum_l2 = cp.sum(mul_weights_quant_data_new * quant_data_new, axis=1, keepdims=True) # (nb, 1)
371+
sum_xl = cp.sum(mul_weights_quant_data_new * data, axis=1, keepdims=True) # (nb, 1)
372+
D = cp.subtract(sum_w * sum_l2, sum_l**2) # (nb, 1)
370373

371-
this_scale = (sum_w * sum_xl - sum_x * sum_l) / D # (nb, 1)
372-
this_min = (sum_l2 * sum_x - sum_l * sum_xl) / D # (nb, 1)
374+
this_scale = (sum_w * sum_xl - sum_x * sum_l) / D # (nb, 1)
375+
this_min = (sum_l2 * sum_x - sum_l * sum_xl) / D # (nb, 1)
373376

374-
diff = this_scale * quant_data_new + this_min - data # (nb, group_size)
375-
mad = cp.sum(weights * diff ** 2, axis=1, keepdims=True) # (nb, 1)
377+
diff = this_scale * quant_data_new + this_min - data # (nb, group_size)
378+
mad = cp.sum(weights * diff**2, axis=1, keepdims=True) # (nb, 1)
376379

377380
mad_1 = cp.array(mad)
378381
best_mad_1 = cp.array(best_mad)
@@ -382,7 +385,7 @@ def quant_tensor_k_quant_cuda(data, num_bits=4, group_size=32):
382385
scale[idx_to_replace] = this_scale[idx_to_replace]
383386
rmin[idx_to_replace] = this_min[idx_to_replace]
384387

385-
zero_point = cp.clip((( - rmin) / scale).round(), 0, maxq).astype("uint8")
388+
zero_point = cp.clip(((-rmin) / scale).round(), 0, maxq).astype("uint8")
386389
scale = scale.astype(cp.float64)
387390
q_weight = cp.empty_like(data, dtype=scale.dtype)
388391
cp.divide(data, scale, out=q_weight)
@@ -392,20 +395,18 @@ def quant_tensor_k_quant_cuda(data, num_bits=4, group_size=32):
392395

393396
return q_weight.get(), scale.get(), zero_point.get()
394397
else:
395-
logger.warning("Try to use k-quant quantization on CUDA. However, CUDA is not available." \
396-
"Fall back to k-quant quantization on CPU.")
397-
return quant_tensor_k_quant_cpu(
398-
data, num_bits, group_size
398+
logger.warning(
399+
"Try to use k-quant quantization on CUDA. However, CUDA is not available."
400+
"Fall back to k-quant quantization on CPU."
399401
)
402+
return quant_tensor_k_quant_cpu(data, num_bits, group_size)
400403
except ImportError:
401404
logger.info(
402-
"Now we are using k-quant quantization on cpu, which is time consuming." \
403-
"Please consider install cupy to speed up on CUDA. See https://cupy.dev/" \
404-
"Please also install torch to check CUDA availablity."
405-
)
406-
return quant_tensor_k_quant_cpu(
407-
data, num_bits, group_size
405+
"Now we are using k-quant quantization on cpu, which is time consuming."
406+
"Please consider install cupy to speed up on CUDA. See https://cupy.dev/"
407+
"Please also install torch to check CUDA availability."
408408
)
409+
return quant_tensor_k_quant_cpu(data, num_bits, group_size)
409410

410411

411412
def qdq_tensor(data, num_bits=4, group_size=32, scheme="asym", dtype="int", ratio=1.0):

0 commit comments

Comments
 (0)