#include "SkMipMap.h"
#include "SkBitmap.h"
#include "SkColorPriv.h"
-#include "SkNx.h"
//
// ColorTypeFilter is the "Type" we pass to some downsample template functions.
struct ColorTypeFilter_8888 {
typedef uint32_t Type;
-#if defined(SKNX_IS_FAST)
- static Sk4h Expand(uint32_t x) {
- return SkNx_cast<uint16_t>(Sk4b::Load((const uint8_t*)&x));
- }
- static uint32_t Compact(const Sk4h& x) {
- uint32_t r;
- SkNx_cast<uint8_t>(x).store((uint8_t*)&r);
- return r;
- }
-#else
static uint64_t Expand(uint32_t x) {
return (x & 0xFF00FF) | ((uint64_t)(x & 0xFF00FF00) << 24);
}
static uint32_t Compact(uint64_t x) {
return (uint32_t)((x & 0xFF00FF) | ((x >> 24) & 0xFF00FF00));
}
-#endif
};
struct ColorTypeFilter_565 {
}
};
-template <typename T> T add_121(const T& a, const T& b, const T& c) {
+template <typename T> T add_121(T a, T b, T c) {
return a + b + b + c;
}
auto p0 = static_cast<const typename F::Type*>(src);
auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
auto d = static_cast<typename F::Type*>(dst);
-
+
auto c02 = F::Expand(p0[0]);
auto c12 = F::Expand(p1[0]);
for (int i = 0; i < count; ++i) {
auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
auto d = static_cast<typename F::Type*>(dst);
-
+
for (int i = 0; i < count; ++i) {
auto c00 = F::Expand(p0[0]);
auto c01 = F::Expand(p0[1]);
float32x4_t fVec;
};
-// It's possible that for our current use cases, representing this as
-// half a uint16x8_t might be better than representing it as a uint16x4_t.
-// It'd make conversion to Sk4b one step simpler.
-template <>
-class SkNx<4, uint16_t> {
-public:
- SkNx(const uint16x4_t& vec) : fVec(vec) {}
-
- SkNx() {}
- SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
- static SkNx Load(const uint16_t vals[4]) { return vld1_u16(vals); }
-
- SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
- fVec = (uint16x4_t) { a,b,c,d };
- }
-
- void store(uint16_t vals[4]) const { vst1_u16(vals, fVec); }
-
- SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
-
- SkNx operator << (int bits) const { SHIFT16(vshl_n_u16, fVec, bits); }
- SkNx operator >> (int bits) const { SHIFT16(vshr_n_u16, fVec, bits); }
-
- static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
-
- template <int k> uint16_t kth() const {
- SkASSERT(0 <= k && k < 4);
- return vget_lane_u16(fVec, k&3);
- }
-
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
- return vbsl_u16(fVec, t.fVec, e.fVec);
- }
-
- uint16x4_t fVec;
-};
-
template <>
class SkNx<8, uint16_t> {
public:
(uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]);
}
-template<> inline Sk4h SkNx_cast<uint16_t, uint8_t, 4>(const Sk4b& src) {
- return vget_low_u16(vmovl_u8(src.fVec));
-}
-
-template<> inline Sk4b SkNx_cast<uint8_t, uint16_t, 4>(const Sk4h& src) {
- return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
-}
-
} // namespace
#endif//SkNx_neon_DEFINED
_mm_cvttps_epi32(d.fVec))));
}
-template<> inline Sk4h SkNx_cast<uint16_t, uint8_t, 4>(const Sk4b& src) {
- return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
-}
-
-template<> inline Sk4b SkNx_cast<uint8_t, uint16_t, 4>(const Sk4h& src) {
- return _mm_packus_epi16(src.fVec, src.fVec);
-}
-
} // namespace