Chromium no longer supports gcc build after enabling clang.
This commit supports gcc build.
$<build command> --gcc
Change-Id: Id5ef9fb5a112184ff9a57db9c63892e80eed26b8
Signed-off-by: venu.musham <venu.musham@samsung.com>
template <typename T> class SupportsWeakPtr;
template <typename T> class WeakPtr;
+// This class can only be instantiated if the constructor argument inherits
+// from SupportsWeakPtr<T> in exactly one way.
+template <typename T>
+struct ExtractSinglyInheritedBase;
+template <typename T>
+struct ExtractSinglyInheritedBase<SupportsWeakPtr<T>> {
+ using Base = T;
+ explicit ExtractSinglyInheritedBase(SupportsWeakPtr<T>*);
+};
+template <typename T>
+ExtractSinglyInheritedBase(SupportsWeakPtr<T>*)
+-> ExtractSinglyInheritedBase<SupportsWeakPtr<T>>;
namespace internal {
// These classes are part of the WeakPtr implementation.
}
private:
- // This class can only be instantiated if the constructor argument inherits
- // from SupportsWeakPtr<T> in exactly one way.
- template <typename T>
- struct ExtractSinglyInheritedBase;
- template <typename T>
- struct ExtractSinglyInheritedBase<SupportsWeakPtr<T>> {
- using Base = T;
- explicit ExtractSinglyInheritedBase(SupportsWeakPtr<T>*);
- };
- template <typename T>
- ExtractSinglyInheritedBase(SupportsWeakPtr<T>*)
- -> ExtractSinglyInheritedBase<SupportsWeakPtr<T>>;
};
// Forward declaration from safe_ptr.h.
}
Backtrace::Backtrace() = default;
+Backtrace::Backtrace(const Backtrace &) = default;
bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
if (lhs.frame_count != rhs.frame_count) return false;
AllocationContext::AllocationContext(): type_name(nullptr) {}
-AllocationContext::AllocationContext(const Backtrace& backtrace,
+AllocationContext::AllocationContext(const Backtrace& back_trace,
const char* type_name)
- : backtrace(backtrace), type_name(type_name) {}
+ : backtrace(back_trace), type_name(type_name) {}
bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
struct BASE_EXPORT Backtrace {
Backtrace();
+ Backtrace(const Backtrace &);
// If the stack is higher than what can be stored here, the top frames
// (the ones further from main()) are stored. Depth of 12 is enough for most
other.enable_event_package_name_filter_ ||
histogram_names_ != other.histogram_names_ ||
systrace_events_ != other.systrace_events_ ||
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
process_filter_config_ != other.process_filter_config_ ||
memory_dump_config_ != other.memory_dump_config_ ||
+#endif
!category_filter_.IsEquivalentTo(other.category_filter_)) {
return false;
}
# ---------------------------------
if (is_linux || is_chromeos || is_android || is_fuchsia || is_tizen) {
asmflags += [ "-fPIC" ]
- cflags += [ "-fPIC" ]
+ cflags += [ "-fPIC"]
ldflags += [ "-fPIC", "-latomic" ]
rustflags += [ "-Crelocation-model=pic" ]
if (!is_clang) {
# Use pipes for communicating between sub-processes. Faster.
# (This flag doesn't do anything with Clang.)
- cflags += [ "-pipe" ]
+ cflags += [ "-pipe", "-fconcepts", "-flax-vector-conversions" ]
}
ldflags += [
// overwrite their local state unconditionally.
// Not syncing back has the additional advantage that it makes deprecating
// these fields (should this ever happen) easier.
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
if (GetProfileValueHash(*profile_, type) == metadata.value_hash()) {
auto& observations = profile_->token_quality().observations_[type];
CHECK(observations.empty());
proto_observation.form_hash()));
}
}
+#endif
}
const raw_ref<AutofillProfile> profile_;
// ADDRESS_HOME_LANDMARK: "foo"
class AutofillParsingProcess {
public:
- constexpr AutofillParsingProcess() = default;
+ AutofillParsingProcess() = default;
AutofillParsingProcess(const AutofillParsingProcess& other) = delete;
AutofillParsingProcess& operator=(const AutofillParsingProcess& right) =
delete;
- virtual constexpr ~AutofillParsingProcess() = default;
+ virtual ~AutofillParsingProcess() = default;
// Parses `value` and returns the extracted field type matches.
virtual ValueParsingResults Parse(std::string_view value) const = 0;
public:
// Note that `parsing_regex` needs to survive the lifetime of the
// Decomposition.
- constexpr Decomposition(std::string_view parsing_regex,
+ Decomposition(std::string_view parsing_regex,
bool anchor_beginning,
bool anchor_end)
: parsing_regex_(parsing_regex),
anchor_end_(anchor_end) {}
Decomposition(const Decomposition&) = delete;
Decomposition& operator=(const Decomposition&) = delete;
- constexpr ~Decomposition() override;
+ ~Decomposition() override = default;
ValueParsingResults Parse(std::string_view value) const override;
const bool anchor_end_ = true;
};
-constexpr Decomposition::~Decomposition() = default;
-
// A DecompositionCascade enables us to try one Decomposition after the next
// until we have found a match. It can be fitted with a condition to only use it
// in case the condition is fulfilled. The lack of a condition is expressed by
public:
// Note that `condition_regex` and `alternatives` need to survive the lifetime
// of the DecompositionCascade.
- constexpr DecompositionCascade(
+ DecompositionCascade(
std::string_view condition_regex,
base::span<const AutofillParsingProcess* const> alternatives)
: condition_regex_(condition_regex), alternatives_(alternatives) {}
DecompositionCascade(const DecompositionCascade&) = delete;
DecompositionCascade& operator=(const DecompositionCascade&) = delete;
- constexpr ~DecompositionCascade() override;
+ ~DecompositionCascade() override = default;
ValueParsingResults Parse(std::string_view value) const override;
const base::span<const AutofillParsingProcess* const> alternatives_;
};
-constexpr DecompositionCascade::~DecompositionCascade() = default;
-
// An ExtractPart parsing process attempts to match a string to a
// parsing expression, and then extracts the captured field type values. It can
// be fitted with a condition to only use it in case the condition is fulfilled.
public:
// Note that `condition_regex` and `parsing_regex` need to survive the
// lifetime of the DecompositionCascade.
- constexpr ExtractPart(std::string_view condition_regex,
+ ExtractPart(std::string_view condition_regex,
std::string_view parsing_regex)
: condition_regex_(condition_regex), parsing_regex_(parsing_regex) {}
ExtractPart(const ExtractPart&) = delete;
ExtractPart& operator=(const ExtractPart&) = delete;
- constexpr ~ExtractPart() override;
+ ~ExtractPart() override = default;
ValueParsingResults Parse(std::string_view value) const override;
const std::string_view parsing_regex_;
};
-constexpr ExtractPart::~ExtractPart() = default;
-
// Unlike for a DecompositionCascade, ExtractParts does not follow the "the
// first match wins" principle but applies all matching attempts in sequence so
// the last match wins. This also enables extracting different data (e.g. an
public:
// Note that `condition_regex` and `pieces` need to survive the lifetime of
// the ExtractParts.
- constexpr ExtractParts(std::string_view condition_regex,
+ ExtractParts(std::string_view condition_regex,
base::span<const ExtractPart* const> pieces)
: condition_regex_(condition_regex), pieces_(pieces) {}
ExtractParts(const ExtractParts&) = delete;
ExtractParts& operator=(const ExtractParts&) = delete;
- constexpr ~ExtractParts() override;
+ ~ExtractParts() override = default;
ValueParsingResults Parse(std::string_view value) const override;
const base::span<const ExtractPart* const> pieces_;
};
-constexpr ExtractParts::~ExtractParts() = default;
-
} // namespace autofill::i18n_model_definition
#endif // COMPONENTS_AUTOFILL_CORE_BROWSER_DATA_MODEL_AUTOFILL_I18N_PARSING_EXPRESSION_COMPONENTS_H_
// Section for singular decomposition(s).
-constexpr Decomposition kDecompositionList[] = {
+Decomposition kDecompositionList[] = {
Decomposition(kRegularExpression_26, true, true),
Decomposition(kRegularExpression_29, true, true),
Decomposition(kRegularExpression_27, true, true),
};
// Section for singular extract part(s).
-constexpr ExtractPart kExtractPartList[]{
+ExtractPart kExtractPartList[]{
ExtractPart("", kRegularExpression_38),
ExtractPart("", kRegularExpression_39),
ExtractPart("", kRegularExpression_40),
// Section for decomposition cascades and their alternatives.
constexpr AutofillParsingProcess const* kDecompositionCascade_0_Alternatives[]{ &kDecompositionList[0], &kDecompositionList[1], &kDecompositionList[2], &kDecompositionList[3]};
-constexpr DecompositionCascade kDecompositionCascade_0 = DecompositionCascade(kRegularExpression_14, kDecompositionCascade_0_Alternatives);
+DecompositionCascade kDecompositionCascade_0 = DecompositionCascade(kRegularExpression_14, kDecompositionCascade_0_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_1_Alternatives[]{ &kDecompositionList[4]};
-constexpr DecompositionCascade kDecompositionCascade_1 = DecompositionCascade(kRegularExpression_13, kDecompositionCascade_1_Alternatives);
+DecompositionCascade kDecompositionCascade_1 = DecompositionCascade(kRegularExpression_13, kDecompositionCascade_1_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_2_Alternatives[]{ &kDecompositionList[5], &kDecompositionList[6], &kDecompositionList[7]};
-constexpr DecompositionCascade kDecompositionCascade_2 = DecompositionCascade("", kDecompositionCascade_2_Alternatives);
+DecompositionCascade kDecompositionCascade_2 = DecompositionCascade("", kDecompositionCascade_2_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_3_Alternatives[]{ &kDecompositionCascade_0, &kDecompositionCascade_1, &kDecompositionCascade_2};
-constexpr DecompositionCascade kDecompositionCascade_3 = DecompositionCascade("", kDecompositionCascade_3_Alternatives);
+DecompositionCascade kDecompositionCascade_3 = DecompositionCascade("", kDecompositionCascade_3_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_4_Alternatives[]{ &kDecompositionList[0], &kDecompositionList[1], &kDecompositionList[2], &kDecompositionList[3]};
-constexpr DecompositionCascade kDecompositionCascade_4 = DecompositionCascade(kRegularExpression_14, kDecompositionCascade_4_Alternatives);
+DecompositionCascade kDecompositionCascade_4 = DecompositionCascade(kRegularExpression_14, kDecompositionCascade_4_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_5_Alternatives[]{ &kDecompositionList[10]};
-constexpr DecompositionCascade kDecompositionCascade_5 = DecompositionCascade(kRegularExpression_13, kDecompositionCascade_5_Alternatives);
+DecompositionCascade kDecompositionCascade_5 = DecompositionCascade(kRegularExpression_13, kDecompositionCascade_5_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_6_Alternatives[]{ &kDecompositionList[5], &kDecompositionList[11], &kDecompositionList[12]};
-constexpr DecompositionCascade kDecompositionCascade_6 = DecompositionCascade("", kDecompositionCascade_6_Alternatives);
+DecompositionCascade kDecompositionCascade_6 = DecompositionCascade("", kDecompositionCascade_6_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_7_Alternatives[]{ &kDecompositionCascade_4, &kDecompositionCascade_5, &kDecompositionCascade_6};
-constexpr DecompositionCascade kDecompositionCascade_7 = DecompositionCascade("", kDecompositionCascade_7_Alternatives);
+DecompositionCascade kDecompositionCascade_7 = DecompositionCascade("", kDecompositionCascade_7_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_8_Alternatives[]{ &kDecompositionList[0], &kDecompositionList[1], &kDecompositionList[2], &kDecompositionList[3]};
-constexpr DecompositionCascade kDecompositionCascade_8 = DecompositionCascade(kRegularExpression_14, kDecompositionCascade_8_Alternatives);
+DecompositionCascade kDecompositionCascade_8 = DecompositionCascade(kRegularExpression_14, kDecompositionCascade_8_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_9_Alternatives[]{ &kDecompositionList[10]};
-constexpr DecompositionCascade kDecompositionCascade_9 = DecompositionCascade(kRegularExpression_13, kDecompositionCascade_9_Alternatives);
+DecompositionCascade kDecompositionCascade_9 = DecompositionCascade(kRegularExpression_13, kDecompositionCascade_9_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_10_Alternatives[]{ &kDecompositionList[5], &kDecompositionList[6], &kDecompositionList[7]};
-constexpr DecompositionCascade kDecompositionCascade_10 = DecompositionCascade("", kDecompositionCascade_10_Alternatives);
+DecompositionCascade kDecompositionCascade_10 = DecompositionCascade("", kDecompositionCascade_10_Alternatives);
constexpr AutofillParsingProcess const* kDecompositionCascade_11_Alternatives[]{ &kDecompositionCascade_8, &kDecompositionCascade_9, &kDecompositionCascade_10};
-constexpr DecompositionCascade kDecompositionCascade_11 = DecompositionCascade("", kDecompositionCascade_11_Alternatives);
+DecompositionCascade kDecompositionCascade_11 = DecompositionCascade("", kDecompositionCascade_11_Alternatives);
// Section for extract parts and their pieces.
constexpr ExtractPart const* kExtractParts_0_Pieces[]{&kExtractPartList[0],&kExtractPartList[1],&kExtractPartList[2]};
-constexpr ExtractParts kExtractParts_0 = ExtractParts("", kExtractParts_0_Pieces);
+ExtractParts kExtractParts_0 = ExtractParts("", kExtractParts_0_Pieces);
constexpr ExtractPart const* kExtractParts_1_Pieces[]{&kExtractPartList[0],&kExtractPartList[1],&kExtractPartList[2]};
-constexpr ExtractParts kExtractParts_1 = ExtractParts("", kExtractParts_1_Pieces);
+ExtractParts kExtractParts_1 = ExtractParts("", kExtractParts_1_Pieces);
constexpr ExtractPart const* kExtractParts_2_Pieces[]{&kExtractPartList[0],&kExtractPartList[1],&kExtractPartList[2],&kExtractPartList[3]};
-constexpr ExtractParts kExtractParts_2 = ExtractParts("", kExtractParts_2_Pieces);
+ExtractParts kExtractParts_2 = ExtractParts("", kExtractParts_2_Pieces);
constexpr ExtractPart const* kExtractParts_3_Pieces[]{&kExtractPartList[4],&kExtractPartList[0],&kExtractPartList[1],&kExtractPartList[2],&kExtractPartList[3]};
-constexpr ExtractParts kExtractParts_3 = ExtractParts("", kExtractParts_3_Pieces);
+ExtractParts kExtractParts_3 = ExtractParts("", kExtractParts_3_Pieces);
constexpr ExtractPart const* kExtractParts_4_Pieces[]{&kExtractPartList[5],&kExtractPartList[6]};
-constexpr ExtractParts kExtractParts_4 = ExtractParts("", kExtractParts_4_Pieces);
+ExtractParts kExtractParts_4 = ExtractParts("", kExtractParts_4_Pieces);
constexpr ExtractPart const* kExtractParts_5_Pieces[]{&kExtractPartList[7],&kExtractPartList[8]};
-constexpr ExtractParts kExtractParts_5 = ExtractParts("", kExtractParts_5_Pieces);
+ExtractParts kExtractParts_5 = ExtractParts("", kExtractParts_5_Pieces);
constexpr ExtractPart const* kExtractParts_6_Pieces[]{&kExtractPartList[7],&kExtractPartList[8]};
-constexpr ExtractParts kExtractParts_6 = ExtractParts("", kExtractParts_6_Pieces);
+ExtractParts kExtractParts_6 = ExtractParts("", kExtractParts_6_Pieces);
constexpr ExtractPart const* kExtractParts_7_Pieces[]{&kExtractPartList[9],&kExtractPartList[5],&kExtractPartList[6],&kExtractPartList[7],&kExtractPartList[8]};
-constexpr ExtractParts kExtractParts_7 = ExtractParts("", kExtractParts_7_Pieces);
+ExtractParts kExtractParts_7 = ExtractParts("", kExtractParts_7_Pieces);
} // namespace
// A lookup map for parsing expressions for countries and field types.
FieldFillingEntry& operator=(const FieldFillingEntry&) = default;
FieldFillingEntry& operator=(FieldFillingEntry&&) = default;
- bool operator==(const FieldFillingEntry& rhs) const = default;
+ bool operator==(const FieldFillingEntry& rhs);
// Value of the field prior to the Undo operation.
std::u16string value;
constexpr std::string_view kInstrumentType = "instrument_type";
// kNickname = "nickname"
constexpr std::string_view kDisplayIconUrl = "display_icon_url";
-constexpr std::initializer_list<std::pair<std::string_view, std::string_view>>
+std::initializer_list<std::pair<std::string_view, std::string_view>>
kPaymentInstrumentsColumnNamesAndTypes = {
{kInstrumentId, "INTEGER NOT NULL"},
{kInstrumentType, "INTEGER NOT NULL"},
// kInstrumentType = "instrument_type"
// kUseCount = "use_count"
// kUseDate = "use_date"
-constexpr std::initializer_list<std::pair<std::string_view, std::string_view>>
+std::initializer_list<std::pair<std::string_view, std::string_view>>
kPaymentInstrumentsMetadataColumnNamesAndTypes = {
{kInstrumentId, "INTEGER NOT NULL"},
{kInstrumentType, "INTEGER NOT NULL"},
// kInstrumentId = "instrument_id"
// kInstrumentType = "instrument_type"
constexpr std::string_view kPaymentRail = "payment_rail";
-constexpr std::initializer_list<std::pair<std::string_view, std::string_view>>
+std::initializer_list<std::pair<std::string_view, std::string_view>>
kPaymentInstrumentSupportedRailsColumnNamesAndTypes = {
{kInstrumentId, "INTEGER NOT NULL"},
{kInstrumentType, "INTEGER NOT NULL"},
// kBankName = "bank_name"
constexpr std::string_view kAccountNumberSuffix = "account_number_suffix";
constexpr std::string_view kAccountType = "account_type";
-constexpr std::initializer_list<std::pair<std::string_view, std::string_view>>
+std::initializer_list<std::pair<std::string_view, std::string_view>>
bank_accounts_column_names_and_types = {
{kInstrumentId, "INTEGER PRIMARY KEY NOT NULL"},
{kBankName, "VARCHAR"},
int status;
// Serialized observations for the stored type.
std::vector<uint8_t> serialized_data;
+
+ FieldTypeData(autofill::ServerFieldType& type,
+ std::__cxx11::basic_string<char16_t> value, int status,
+ std::vector<unsigned char> serialized_data) {
+ type = type;
+ value = value;
+ status = status;
+ serialized_data = serialized_data;
+ }
};
std::vector<FieldTypeData> field_type_values;
// passing last_updated_timestamp, which is needed for sync bridge. Limited
// scope in autofill table & sync bridge.
struct ServerCvc {
- bool operator==(const ServerCvc&) const = default;
+ bool operator==(const ServerCvc&) const;
// A server generated id to identify the corresponding credit card.
const int64_t instrument_id;
// CVC value of the card.
// Returns true if some element of |xs| is an element, else |false|.
bool contains_any(const DenseSet& xs) const {
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
return (bitset_ & xs.bitset_) != Bitset{};
+#else
+ return false;
+#endif
}
// Returns true if every elements of |xs| is an element, else |false|.
MediaType(const std::string& vendor_id,
const std::string& custom_display_name);
- bool operator==(const MediaType& other) const = default;
+ bool operator==(const MediaType& other) const;
bool operator!=(const MediaType& other) const { return !(*this == other); }
bool IsValid() const;
Key(base::span<const uint8_t> key, const mojom::Algorithm& algo);
- bool operator==(const Key& other) const = default;
+ bool operator==(const Key& other) const;
private:
friend class Encryptor;
FieldInfo& operator=(const FieldInfo&);
~FieldInfo();
- friend bool operator==(const FieldInfo& lhs, const FieldInfo& rhs) = default;
+ friend bool operator==(const FieldInfo& lhs, const FieldInfo& rhs);
};
// Manages information about the last user-interacted fields, keeps
bool is_override = false;
friend bool operator==(const PasswordFieldPrediction& lhs,
- const PasswordFieldPrediction& rhs) = default;
+ const PasswordFieldPrediction& rhs);
};
// Contains server predictions for a form.
std::vector<PasswordFieldPrediction> fields;
friend bool operator==(const FormPredictions& lhs,
- const FormPredictions& rhs) = default;
+ const FormPredictions& rhs);
};
// Extracts password related server predictions from `form` and `predictions`.
// Id of the field within the frame.
autofill::FieldRendererId renderer_id;
+ PossibleUsernameFieldIdentifier(int driver_id, autofill::FieldRendererId renderer_id) {
+ driver_id = driver_id;
+ renderer_id = renderer_id;
+ }
+
friend bool operator<(const PossibleUsernameFieldIdentifier& lhs,
const PossibleUsernameFieldIdentifier& rhs) {
return std::make_pair(lhs.driver_id, lhs.renderer_id) <
struct DestinationEnumEvent {
std::string type;
std::string data;
+ DestinationEnumEvent(std::string type, std::string data) {
+ type = type;
+ data = data;
+ }
};
// An event to be sent to a custom url.
// Macros are substituted using the `ReportingMacros`.
struct DestinationURLEvent {
GURL url;
+ DestinationURLEvent(GURL url) {
+ url = url;
+ }
};
// Class that receives report events from fenced frames, and uses a
return requires_origin_keyed_process_;
}
- bool operator==(const OriginAgentClusterIsolationState&) const = default;
+ bool operator==(const OriginAgentClusterIsolationState&) const;
private:
OriginAgentClusterIsolationState(bool is_origin_agent_cluster,
using CodeAndShiftedChar =
std::pair<ui::KeyboardCode, absl::optional<char16_t>>;
-constexpr CodeAndShiftedChar KeyboardCodeFromKeyIdentifier(
+CodeAndShiftedChar KeyboardCodeFromKeyIdentifier(
base::StringPiece str) {
#if BUILDFLAG(IS_MAC)
constexpr auto CommandOrControl = ui::VKEY_COMMAND;
constexpr auto CommandOrControl = ui::VKEY_CONTROL;
#endif
- constexpr auto Lookup =
+ auto Lookup =
base::MakeFixedFlatMapSorted<base::StringPiece, CodeAndShiftedChar>({
{"alt", {ui::VKEY_MENU, {}}},
{"altgr", {ui::VKEY_ALTGR, {}}},
return {ui::VKEY_UNKNOWN, {}};
}
-constexpr CodeAndShiftedChar KeyboardCodeFromCharCode(char16_t c) {
+CodeAndShiftedChar KeyboardCodeFromCharCode(char16_t c) {
switch (c) {
case ' ':
return {ui::VKEY_SPACE, {}};
GenFunc gen_func,
DeleteFunc delete_func)
: gl_(gl), id_(0u), delete_func_(delete_func) {
- (gl_->*gen_func)(1, &id_);
+ (gl_.get()->*gen_func)(1, &id_);
}
operator GLuint() const { return id_; }
~ScopedGLuint() {
if (id_ != 0) {
- (gl_->*delete_func_)(1, &id_);
+ (gl_.get()->*delete_func_)(1, &id_);
}
}
typedef void (gles2::GLES2Interface::*BindFunc)(GLenum target, GLuint id);
ScopedBinder(gles2::GLES2Interface* gl, GLuint id, BindFunc bind_func)
: gl_(gl), bind_func_(bind_func) {
- (gl_->*bind_func_)(Target, id);
+ (gl_.get()->*bind_func_)(Target, id);
}
ScopedBinder(const ScopedBinder&) = delete;
ScopedBinder& operator=(const ScopedBinder&) = delete;
- virtual ~ScopedBinder() { (gl_->*bind_func_)(Target, 0); }
+ virtual ~ScopedBinder() { (gl_.get()->*bind_func_)(Target, 0); }
private:
raw_ptr<gles2::GLES2Interface> gl_;
private:
// Comparison operator is private and only defined for use by
// EqualsForTesting, see comment there for more details.
- bool operator==(const CertPrincipal& other) const = default;
+ bool operator==(const CertPrincipal& other) const;
};
} // namespace net
CFLAGS="$(echo $CFLAGS | sed -E 's/-g[0-9] /-g0 /g')"
CXXFLAGS="$(echo $CXXFLAGS | sed -E 's/-g[0-9] /-g0 /g')"
%endif
+%if %{__use_clang} == 0
+ ulimit -n 16384
+%endif
%if 0%{?__enable_wrt_js}
CFLAGS="$(echo $CFLAGS | sed -E 's/-O2/-Os/g')"
// DCHECKs as much as we can; this also checks (compile-time)
// that everything inherits from CSSUnresolvedProperty.
union alignas(kCSSPropertyUnionBytes) CSSPropertyUnion {
- constexpr CSSPropertyUnion() {} // For kInvalid.
- constexpr CSSPropertyUnion(Variable property)
+ CSSPropertyUnion() {} // For kInvalid.
+ CSSPropertyUnion(Variable property)
: variable_(std::move(property)) {
DCHECK(reinterpret_cast<const CSSUnresolvedProperty *>(this) ==
static_cast<const CSSUnresolvedProperty *>(&variable_));
dynamic_height_(height) {}
explicit ViewportSize(const LayoutView*);
- bool operator==(const ViewportSize&) const = default;
+ bool operator==(const ViewportSize&) const;
// v*
double Width() const { return LargeWidth(); }
// or all-one for each byte, so we can use the code from
// https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon
non_name_mask = non_name_mask && (b >= 0);
- uint8x8_t narrowed_mask = vshrn_n_u16(non_name_mask, 4);
+ uint8x8_t narrowed_mask = vshrn_n_u16(reinterpret_cast<uint16x8_t>(non_name_mask), 4);
uint64_t bits = vget_lane_u64(vreinterpret_u64_u8(narrowed_mask), 0);
if (bits == 0) {
size += 16;
}
}
if (IsSet(PropertySetFlag::kVariantEastAsian)) {
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
if (description.VariantEastAsian() !=
- font_description_.VariantEastAsian()) {
+ font_description_.VariantEastAsian())
+#endif
+ {
modified = true;
description.SetVariantEastAsian(font_description_.VariantEastAsian());
}
}
if (IsSet(PropertySetFlag::kVariantLigatures)) {
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
if (description.GetVariantLigatures() !=
- font_description_.GetVariantLigatures()) {
+ font_description_.GetVariantLigatures())
+#endif
+ {
modified = true;
description.SetVariantLigatures(font_description_.GetVariantLigatures());
}
}
if (IsSet(PropertySetFlag::kVariantNumeric)) {
- if (description.VariantNumeric() != font_description_.VariantNumeric()) {
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
+ if (description.VariantNumeric() != font_description_.VariantNumeric())
+#endif
+ {
modified = true;
description.SetVariantNumeric(font_description_.VariantNumeric());
}
int index;
Color color;
+ FontPaletteOverride(){}
+ FontPaletteOverride(int index, Color color) {
+ index = index;
+ color = color;
+ }
bool operator==(const FontPaletteOverride& other) const {
return index == other.index && color == other.color;
}
base::Time initial_time_;
base::TimeTicks initial_ticks_;
- std::atomic<base::TimeTicks> current_ticks_;
+ std::atomic<base::TimeTicks> current_ticks_ {base::TimeTicks()};
base::Lock lock_;
std::unique_ptr<base::subtle::ScopedTimeClockOverrides> clock_override_
}
SkSL::String::appendf(&result,
- " layout(offset=%zu) %s %s",
+ " layout(offset=%d) %s %s",
offsetter.advanceOffset(u.type(), u.count()),
SkSLTypeString(u.type()),
uniformName.c_str());
if (gradData.fNumStops <= kInternalStopLimit) {
if (gradData.fNumStops <= 4) {
// Round up to 4 stops.
- gatherer->writeArray({gradData.fColors, 4});
- // The offsets are packed into a single float4 to save space.
- gatherer->write(SkSLType::kFloat4, &gradData.fOffsets);
+ gatherer->writeArray(SkSpan{gradData.fColors, 4});
+ gatherer->write(gradData.fOffsets[0]);
} else if (gradData.fNumStops <= 8) {
// Round up to 8 stops.
- gatherer->writeArray({gradData.fColors, 8});
- // The offsets are packed into a float4 array to save space.
- gatherer->writeArray(SkSLType::kFloat4, &gradData.fOffsets, 2);
+ gatherer->writeArray(SkSpan{gradData.fColors, 8});
+ gatherer->writeArray(SkSpan{gradData.fOffsets, 2});
} else {
// Did kNumInternalStorageStops change?
SkUNREACHABLE;
if (fNumStops <= kNumInternalStorageStops) {
memcpy(fColors, colors, fNumStops * sizeof(SkColor4f));
+ float* rawOffsets = fOffsets[0].ptr();
if (offsets) {
- memcpy(fOffsets, offsets, fNumStops * sizeof(float));
+ memcpy(rawOffsets, offsets, fNumStops * sizeof(float));
} else {
for (int i = 0; i < fNumStops; ++i) {
- fOffsets[i] = SkIntToFloat(i) / (fNumStops-1);
+ rawOffsets[i] = SkIntToFloat(i) / (fNumStops-1);
}
}
- // Extend the colors and offset, if necessary, to fill out the arrays
- // TODO: this should be done later when the actual code snippet has been selected!!
- for (int i = fNumStops ; i < kNumInternalStorageStops; ++i) {
+ // Extend the colors and offset, if necessary, to fill out the arrays.
+ // The unrolled binary search implementation assumes excess stops match the last real value.
+ for (int i = fNumStops; i < kNumInternalStorageStops; ++i) {
fColors[i] = fColors[fNumStops-1];
- fOffsets[i] = fOffsets[fNumStops-1];
+ rawOffsets[i] = rawOffsets[fNumStops-1];
}
} else {
fColorsAndOffsetsProxy = std::move(colorsAndOffsetsProxy);
SkSpan<const float> coeffs) {
VALIDATE_UNIFORMS(gatherer, keyContext.dict(), BuiltInCodeSnippetID::kCoeffBlender)
SkASSERT(coeffs.size() == 4);
- gatherer->write(SkSLType::kHalf4, coeffs.data());
+ gatherer->writeHalf(SkV4{coeffs[0], coeffs[1], coeffs[2], coeffs[3]});
builder->addBlock(BuiltInCodeSnippetID::kCoeffBlender);
}
imageToDraw->dimensions(),
origShader->subset());
for (int i = 0; i < SkYUVAInfo::kYUVAChannelCount; ++i) {
- memset(&imgData.fChannelSelect[i], 0, sizeof(SkColor4f));
+ memset(&imgData.fChannelSelect[i], 0, sizeof(SkV4));
}
int textureCount = 0;
SkYUVAInfo::YUVALocations yuvaLocations = yuvaProxies.yuvaLocations();
int fNumStops;
// For gradients w/ <= kNumInternalStorageStops stops we use fColors and fOffsets.
+ // The offsets are packed into a single float4 to save space when the layout is std140.
// Otherwise we use fColorsAndOffsetsProxy.
SkPMColor4f fColors[kNumInternalStorageStops];
- float fOffsets[kNumInternalStorageStops];
+ SkV4 fOffsets[kNumInternalStorageStops / 4];
sk_sp<TextureProxy> fColorsAndOffsetsProxy;
SkGradientShader::Interpolation fInterpolation;
SkTileMode fTileModes[2];
SkISize fImgSize;
SkRect fSubset;
- SkColor4f fChannelSelect[4];
+ SkV4 fChannelSelect[4];
SkMatrix fYUVtoRGBMatrix;
SkPoint3 fYUVtoRGBTranslate;
#ifdef SK_DEBUG
void PipelineDataGatherer::checkReset() {
SkASSERT(fTextureDataBlock.empty());
- SkDEBUGCODE(fUniformManager.checkReset());
+ SkASSERT(fUniformManager.isReset());
}
void PipelineDataGatherer::setExpectedUniforms(SkSpan<const Uniform> expectedUniforms) {
const TextureDataBlock& textureDataBlock() { return fTextureDataBlock; }
- void write(const SkM44& mat) { fUniformManager.write(mat); }
- void write(const SkMatrix& mat) { fUniformManager.write(mat); }
- void write(const SkPMColor4f& premulColor) { fUniformManager.write(premulColor); }
- void writePaintColor(const SkPMColor4f& premulColor) {
- fUniformManager.writePaintColor(premulColor);
+ // Mimic the type-safe API available in UniformManager
+ template <typename T> void write(const T& t) { fUniformManager.write(t); }
+ template <typename T> void writeHalf(const T& t) { fUniformManager.writeHalf(t); }
+ template <typename T> void writeArray(SkSpan<const T> t) { fUniformManager.writeArray(t); }
+ template <typename T> void writeHalfArray(SkSpan<const T> t) {
+ fUniformManager.writeHalfArray(t);
}
- void write(const SkRect& rect) { fUniformManager.write(rect); }
- void write(const SkV2& v) { fUniformManager.write(v); }
- void write(const SkV4& v) { fUniformManager.write(v); }
- void write(const SkSize& size) { fUniformManager.write(size); }
- void write(const SkPoint& point) { fUniformManager.write(point); }
- void write(const SkPoint3& point3) { fUniformManager.write(point3); }
- void write(float f) { fUniformManager.write(f); }
- void write(int i) { fUniformManager.write(i); }
-
- void write(SkSLType t, const void* data) { fUniformManager.write(t, data); }
- void write(const Uniform& u, const uint8_t* data) { fUniformManager.write(u, data); }
-
- void writeArray(SkSLType t, const void* data, int n) { fUniformManager.writeArray(t, data, n); }
- void writeArray(SkSpan<const SkColor4f> colors) { fUniformManager.writeArray(colors); }
- void writeArray(SkSpan<const SkPMColor4f> colors) { fUniformManager.writeArray(colors); }
- void writeArray(SkSpan<const float> floats) { fUniformManager.writeArray(floats); }
-
- void writeHalf(float f) { fUniformManager.writeHalf(f); }
- void writeHalf(const SkMatrix& mat) { fUniformManager.writeHalf(mat); }
- void writeHalf(const SkM44& mat) { fUniformManager.writeHalf(mat); }
- void writeHalf(const SkColor4f& unpremulColor) { fUniformManager.writeHalf(unpremulColor); }
- void writeHalfArray(SkSpan<const float> floats) { fUniformManager.writeHalfArray(floats); }
+
+ void write(const Uniform& u, const void* data) { fUniformManager.write(u, data); }
+
+ void writePaintColor(const SkPMColor4f& color) { fUniformManager.writePaintColor(color); }
bool hasUniforms() const { return fUniformManager.size(); }
void doneWithExpectedUniforms() { fUniformManager.doneWithExpectedUniforms(); }
#endif // SK_DEBUG
- TextureDataBlock fTextureDataBlock;
- UniformManager fUniformManager;
+ TextureDataBlock fTextureDataBlock;
+ UniformManager fUniformManager;
};
#ifdef SK_DEBUG
+
/*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
-
#include "src/gpu/graphite/UniformManager.h"
-
-#include "include/core/SkM44.h"
-#include "include/core/SkMatrix.h"
-#include "include/private/base/SkAlign.h"
-#include "include/private/base/SkTemplates.h"
-#include "src/base/SkHalf.h"
-#include "src/gpu/graphite/DrawTypes.h"
#include "src/gpu/graphite/PipelineData.h"
-#include "src/gpu/graphite/Uniform.h"
-
// ensure that these types are the sizes the uniform data is expecting
static_assert(sizeof(int32_t) == 4);
static_assert(sizeof(float) == 4);
-static_assert(sizeof(int16_t) == 2);
static_assert(sizeof(SkHalf) == 2);
-
namespace skgpu::graphite {
-
-//////////////////////////////////////////////////////////////////////////////
-template<typename BaseType>
-static constexpr size_t tight_vec_size(int vecLength) {
- return sizeof(BaseType) * vecLength;
-}
-
-/**
- * From Section 7.6.2.2 "Standard Uniform Block Layout":
- * 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
- * 2. If the member is a two- or four-component vector with components consuming N basic machine
- * units, the base alignment is 2N or 4N, respectively.
- * 3. If the member is a three-component vector with components consuming N
- * basic machine units, the base alignment is 4N.
- * 4. If the member is an array of scalars or vectors, the base alignment and array
- * stride are set to match the base alignment of a single array element, according
- * to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
- * array may have padding at the end; the base offset of the member following
- * the array is rounded up to the next multiple of the base alignment.
- * 5. If the member is a column-major matrix with C columns and R rows, the
- * matrix is stored identically to an array of C column vectors with R components each,
- * according to rule (4).
- * 6. If the member is an array of S column-major matrices with C columns and
- * R rows, the matrix is stored identically to a row of S × C column vectors
- * with R components each, according to rule (4).
- * 7. If the member is a row-major matrix with C columns and R rows, the matrix
- * is stored identically to an array of R row vectors with C components each,
- * according to rule (4).
- * 8. If the member is an array of S row-major matrices with C columns and R
- * rows, the matrix is stored identically to a row of S × R row vectors with C
- * components each, according to rule (4).
- * 9. If the member is a structure, the base alignment of the structure is N, where
- * N is the largest base alignment value of any of its members, and rounded
- * up to the base alignment of a vec4. The individual members of this substructure are then
- * assigned offsets by applying this set of rules recursively,
- * where the base offset of the first member of the sub-structure is equal to the
- * aligned offset of the structure. The structure may have padding at the end;
- * the base offset of the member following the sub-structure is rounded up to
- * the next multiple of the base alignment of the structure.
- * 10. If the member is an array of S structures, the S elements of the array are laid
- * out in order, according to rule (9).
- */
-template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
-struct Rules140 {
- /**
- * For an array of scalars or vectors this returns the stride between array elements. For
- * matrices or arrays of matrices this returns the stride between columns of the matrix. Note
- * that for single (non-array) scalars or vectors we don't require a stride.
- */
- static constexpr size_t Stride(int count) {
- SkASSERT(count >= 1 || count == graphite::Uniform::kNonArray);
- static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
- static_assert(Cols >= 1 && Cols <= 4);
- if (Cols != 1) {
- // This is a matrix or array of matrices. We return the stride between columns.
- SkASSERT(RowsOrVecLength > 1);
- uint32_t stride = Rules140<BaseType, RowsOrVecLength>::Stride(Uniform::kNonArray);
-
- // By Rule 4, the stride and alignment of the individual element must always match vec4.
- return SkAlignTo(stride, tight_vec_size<float>(4));
- }
-
- // Get alignment of a single non-array vector of BaseType by Rule 1, 2, or 3.
- int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
- if (count == Uniform::kNonArray) {
- return n * sizeof(BaseType);
- }
-
- // Rule 4.
-
- // Alignment of vec4 by Rule 2.
- constexpr size_t kVec4Alignment = tight_vec_size<float>(4);
- size_t kElementAlignment = tight_vec_size<BaseType>(n);
- // Round kElementAlignment up to multiple of kVec4Alignment.
- size_t m = (kElementAlignment + kVec4Alignment - 1) / kVec4Alignment;
- return m * kVec4Alignment;
- }
-};
-
-/**
- * When using the std430 storage layout, shader storage blocks will be laid out in buffer storage
- * identically to uniform and shader storage blocks using the std140 layout, except that the base
- * alignment and stride of arrays of scalars and vectors in rule 4 and of structures in rule 9 are
- * not rounded up a multiple of the base alignment of a vec4.
- */
-template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
-struct Rules430 {
- static constexpr size_t Stride(int count) {
- SkASSERT(count >= 1 || count == Uniform::kNonArray);
- static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
- static_assert(Cols >= 1 && Cols <= 4);
-
- if (Cols != 1) {
- // This is a matrix or array of matrices. We return the stride between columns.
- SkASSERT(RowsOrVecLength > 1);
- return Rules430<BaseType, RowsOrVecLength>::Stride(Uniform::kNonArray);
- }
-
- // Get alignment of a single non-array vector of BaseType by Rule 1, 2, or 3.
- int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
- if (count == Uniform::kNonArray) {
- return n * sizeof(BaseType);
- }
-
- // Rule 4 without the round up to a multiple of align-of vec4.
- return tight_vec_size<BaseType>(n);
- }
-};
-
-// The strides used here were derived from the rules we've imposed on ourselves in
-// GrMtlPipelineStateDataManger. Everything is tight except 3-component which have the stride of
-// their 4-component equivalents.
-template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
-struct RulesMetal {
- static constexpr size_t Stride(int count) {
- SkASSERT(count >= 1 || count == Uniform::kNonArray);
- static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
- static_assert(Cols >= 1 && Cols <= 4);
-
- if (Cols != 1) {
- // This is a matrix or array of matrices. We return the stride between columns.
- SkASSERT(RowsOrVecLength > 1);
- return RulesMetal<BaseType, RowsOrVecLength>::Stride(Uniform::kNonArray);
- }
-
- // Get alignment of a single non-array vector of BaseType by Rule 1, 2, or 3.
- int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
- if (count == 0) {
- return n * sizeof(BaseType);
- }
-
- return tight_vec_size<BaseType>(n);
- }
-};
-
-template<template<typename BaseType, int RowsOrVecLength, int Cols> class Rules>
-class Writer {
-private:
- template <typename MemType, typename UniformType>
- static void CopyUniforms(void* dst, const void* src, int numUniforms) {
- if constexpr (std::is_same<MemType, UniformType>::value) {
- // Matching types--use memcpy.
- std::memcpy(dst, src, numUniforms * sizeof(MemType));
- return;
- }
-
- if constexpr (std::is_same<MemType, float>::value &&
- std::is_same<UniformType, SkHalf>::value) {
- // Convert floats to half.
- const float* floatBits = static_cast<const float*>(src);
- SkHalf* halfBits = static_cast<SkHalf*>(dst);
- while (numUniforms-- > 0) {
- *halfBits++ = SkFloatToHalf(*floatBits++);
- }
- return;
- }
-
- if constexpr (std::is_same<MemType, int32_t>::value &&
- std::is_same<UniformType, int16_t>::value) {
- // Convert ints to short.
- const int32_t* intBits = static_cast<const int32_t*>(src);
- int16_t* shortBits = static_cast<int16_t*>(dst);
- while (numUniforms-- > 0) {
- *shortBits++ = int16_t(*intBits++);
- }
- return;
- }
-
- SK_ABORT("implement conversion from MemType to UniformType");
- }
-
- template <typename MemType, typename UniformType, int RowsOrVecLength = 1, int Cols = 1>
- static uint32_t Write(void *dst, int n, const MemType src[]) {
- size_t stride = Rules<UniformType, RowsOrVecLength, Cols>::Stride(n);
- n = (n == Uniform::kNonArray) ? 1 : n;
- n *= Cols;
-
- // A null value for `dst` means that this method was called to calculate the size of the
- // write without actually copying data.
- if (dst) {
- if (stride == RowsOrVecLength * sizeof(UniformType)) {
- CopyUniforms<MemType, UniformType>(dst, src, n * RowsOrVecLength);
- } else {
- for (int i = 0; i < n; ++i) {
- CopyUniforms<MemType, UniformType>(dst, src, RowsOrVecLength);
- src += RowsOrVecLength;
- dst = SkTAddOffset<void>(dst, stride);
- }
- }
- }
-
- return n * stride;
- }
-
- template <typename UniformType>
- static uint32_t WriteSkMatrices(void *dst, int n, const SkMatrix m[]) {
- // Stride() will give us the stride of each column, so mul by 3 to get matrix stride.
- size_t stride = 3 * Rules<UniformType, 3, 3>::Stride(1);
- n = std::max(n, 1);
-
- // A null value for `dst` means that this method was called to calculate the size of the
- // write without actually copying data.
- if (dst) {
- size_t offset = 0;
- for (int i = 0; i < n; ++i) {
- float mt[] = {
- m[i].get(SkMatrix::kMScaleX),
- m[i].get(SkMatrix::kMSkewY),
- m[i].get(SkMatrix::kMPersp0),
- m[i].get(SkMatrix::kMSkewX),
- m[i].get(SkMatrix::kMScaleY),
- m[i].get(SkMatrix::kMPersp1),
- m[i].get(SkMatrix::kMTransX),
- m[i].get(SkMatrix::kMTransY),
- m[i].get(SkMatrix::kMPersp2),
- };
- Write<float, UniformType, 3, 3>(SkTAddOffset<void>(dst, offset), 1, mt);
- offset += stride;
- }
- }
- return n * stride;
- }
-
-public:
- // If `dest` is a nullptr, then this method returns the size of the write without writing any
- // data.
- static uint32_t WriteUniform(SkSLType type,
- CType ctype,
- void *dest,
- int n,
- const void *src) {
- SkASSERT(n >= 1 || n == Uniform::kNonArray);
- switch (type) {
- case SkSLType::kShort:
- return Write<int32_t, int16_t>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kShort2:
- return Write<int32_t, int16_t, 2>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kShort3:
- return Write<int32_t, int16_t, 3>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kShort4:
- return Write<int32_t, int16_t, 4>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kInt:
- return Write<int32_t, int32_t>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kInt2:
- return Write<int32_t, int32_t, 2>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kInt3:
- return Write<int32_t, int32_t, 3>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kInt4:
- return Write<int32_t, int32_t, 4>(dest, n, static_cast<const int32_t *>(src));
-
- case SkSLType::kHalf:
- return Write<float, SkHalf>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kFloat:
- return Write<float, float>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kHalf2:
- return Write<float, SkHalf, 2>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kFloat2:
- return Write<float, float, 2>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kHalf3:
- return Write<float, SkHalf, 3>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kFloat3:
- return Write<float, float, 3>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kHalf4:
- return Write<float, SkHalf, 4>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kFloat4:
- return Write<float, float, 4>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kHalf2x2:
- return Write<float, SkHalf, 2, 2>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kFloat2x2:
- return Write<float, float, 2, 2>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kHalf3x3:
- switch (ctype) {
- case CType::kDefault:
- return Write<float, SkHalf, 3, 3>(dest, n, static_cast<const float *>(src));
- case CType::kSkMatrix:
- return WriteSkMatrices<SkHalf>(dest, n, static_cast<const SkMatrix *>(src));
- }
- SkUNREACHABLE;
-
- case SkSLType::kFloat3x3:
- switch (ctype) {
- case CType::kDefault:
- return Write<float, float, 3, 3>(dest, n, static_cast<const float *>(src));
- case CType::kSkMatrix:
- return WriteSkMatrices<float>(dest, n, static_cast<const SkMatrix *>(src));
- }
- SkUNREACHABLE;
-
- case SkSLType::kHalf4x4:
- return Write<float, SkHalf, 4, 4>(dest, n, static_cast<const float *>(src));
-
- case SkSLType::kFloat4x4:
- return Write<float, float, 4, 4>(dest, n, static_cast<const float *>(src));
-
- default:
- SK_ABORT("Unexpected uniform type");
- }
- }
-};
-
-static bool is_matrix(SkSLType type) {
- switch (type) {
- case SkSLType::kHalf2x2:
- case SkSLType::kHalf3x3:
- case SkSLType::kHalf4x4:
- case SkSLType::kFloat2x2:
- case SkSLType::kFloat3x3:
- case SkSLType::kFloat4x4:
- return true;
- default:
- break;
- }
- return false;
-}
-
-// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
-// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
-// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
-static uint32_t sksltype_to_alignment_mask(SkSLType type) {
- switch (type) {
- case SkSLType::kInt:
- case SkSLType::kUInt:
- case SkSLType::kFloat:
- return 0x3;
- case SkSLType::kInt2:
- case SkSLType::kUInt2:
- case SkSLType::kFloat2:
- return 0x7;
- case SkSLType::kInt3:
- case SkSLType::kUInt3:
- case SkSLType::kFloat3:
- case SkSLType::kInt4:
- case SkSLType::kUInt4:
- case SkSLType::kFloat4:
- return 0xF;
-
- case SkSLType::kFloat2x2:
- return 0x7;
- case SkSLType::kFloat3x3:
- return 0xF;
- case SkSLType::kFloat4x4:
- return 0xF;
-
- case SkSLType::kShort:
- case SkSLType::kUShort:
- case SkSLType::kHalf:
- return 0x1;
- case SkSLType::kShort2:
- case SkSLType::kUShort2:
- case SkSLType::kHalf2:
- return 0x3;
- case SkSLType::kShort3:
- case SkSLType::kShort4:
- case SkSLType::kUShort3:
- case SkSLType::kUShort4:
- case SkSLType::kHalf3:
- case SkSLType::kHalf4:
- return 0x7;
-
- case SkSLType::kHalf2x2:
- return 0x3;
- case SkSLType::kHalf3x3:
- return 0x7;
- case SkSLType::kHalf4x4:
- return 0x7;
-
- // This query is only valid for certain types.
- case SkSLType::kVoid:
- case SkSLType::kBool:
- case SkSLType::kBool2:
- case SkSLType::kBool3:
- case SkSLType::kBool4:
- case SkSLType::kTexture2DSampler:
- case SkSLType::kTextureExternalSampler:
- case SkSLType::kTexture2DRectSampler:
- case SkSLType::kSampler:
- case SkSLType::kTexture2D:
- case SkSLType::kInput:
- break;
- }
- SK_ABORT("Unexpected type");
-}
-
-// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
-// taking into consideration all alignment requirements. Returns the aligned start offset for the
-// new uniform.
-static uint32_t get_ubo_aligned_offset(Layout layout,
- uint32_t currentOffset,
- SkSLType type,
- bool isArray) {
- uint32_t alignmentMask;
- if (layout == Layout::kStd140 && (isArray || is_matrix(type))) {
- // std140 array and matrix element alignment always equals the base alignment of a vec4.
- alignmentMask = sksltype_to_alignment_mask(SkSLType::kFloat4);
+int UniformOffsetCalculator::advanceOffset(SkSLType type, int count) {
+ SkASSERT(SkSLTypeCanBeUniformValue(type));
+ int dimension = SkSLTypeMatrixSize(type);
+ if (dimension > 0) {
+ // All SkSL matrices are square and can be interpreted as an array of column vectors
+ count = std::max(count, 1) * dimension;
} else {
- alignmentMask = sksltype_to_alignment_mask(type);
- }
- return (currentOffset + alignmentMask) & ~alignmentMask;
-}
-
-SkSLType UniformOffsetCalculator::getUniformTypeForLayout(SkSLType type) {
- if (fLayout != Layout::kMetal) {
- // GL/Vk expect uniforms in 32-bit precision. Convert lower-precision types to 32-bit.
- switch (type) {
- case SkSLType::kShort: return SkSLType::kInt;
- case SkSLType::kUShort: return SkSLType::kUInt;
- case SkSLType::kHalf: return SkSLType::kFloat;
-
- case SkSLType::kShort2: return SkSLType::kInt2;
- case SkSLType::kUShort2: return SkSLType::kUInt2;
- case SkSLType::kHalf2: return SkSLType::kFloat2;
-
- case SkSLType::kShort3: return SkSLType::kInt3;
- case SkSLType::kUShort3: return SkSLType::kUInt3;
- case SkSLType::kHalf3: return SkSLType::kFloat3;
-
- case SkSLType::kShort4: return SkSLType::kInt4;
- case SkSLType::kUShort4: return SkSLType::kUInt4;
- case SkSLType::kHalf4: return SkSLType::kFloat4;
-
- case SkSLType::kHalf2x2: return SkSLType::kFloat2x2;
- case SkSLType::kHalf3x3: return SkSLType::kFloat3x3;
- case SkSLType::kHalf4x4: return SkSLType::kFloat4x4;
-
- default: break;
- }
- }
-
- return type;
-}
-
-void UniformOffsetCalculator::setLayout(Layout layout) {
- fLayout = layout;
- switch (layout) {
- case Layout::kStd140:
- fWriteUniform = Writer<Rules140>::WriteUniform;
- break;
- case Layout::kStd430:
- fWriteUniform = Writer<Rules430>::WriteUniform;
- break;
- case Layout::kMetal:
- fWriteUniform = Writer<RulesMetal>::WriteUniform;
- break;
- case Layout::kInvalid:
- SK_ABORT("Invalid layout type");
- break;
- }
-}
-
-UniformOffsetCalculator::UniformOffsetCalculator(Layout layout, uint32_t startingOffset)
- : fLayout(layout), fOffset(startingOffset) {
- this->setLayout(fLayout);
-}
-
-size_t UniformOffsetCalculator::advanceOffset(SkSLType type, unsigned int count) {
- SkSLType revisedType = this->getUniformTypeForLayout(type);
-
- // Insert padding as needed to get the correct uniform alignment.
- uint32_t alignedOffset = get_ubo_aligned_offset(fLayout,
- fOffset,
- revisedType,
- /*isArray=*/count != Uniform::kNonArray);
- SkASSERT(alignedOffset >= fOffset);
-
- // Append the uniform size to our offset, then return the uniform start position.
- uint32_t uniformSize = fWriteUniform(revisedType, CType::kDefault,
- /*dest=*/nullptr, count, /*src=*/nullptr);
- fOffset = alignedOffset + uniformSize;
+ dimension = SkSLTypeVecLength(type);
+ }
+ SkASSERT(1 <= dimension && dimension <= 4);
+ // Bump dimension up to 4 if the array or vec3 consumes 4 primitives per element
+ // NOTE: This affects the size, alignment already rounds up to a power of 2 automatically.
+ const bool isArray = count > Uniform::kNonArray;
+ if ((isArray && LayoutRules::AlignArraysAsVec4(fLayout)) ||
+ (dimension == 3 && (isArray || LayoutRules::PadVec3Size(fLayout)))) {
+ dimension = 4;
+ }
+ const int primitiveSize = LayoutRules::UseFullPrecision(fLayout) ||
+ SkSLTypeIsFullPrecisionNumericType(type) ? 4 : 2;
+ const int align = SkNextPow2(dimension) * primitiveSize;
+ const int alignedOffset = SkAlignTo(fOffset, align);
+ fOffset = alignedOffset + dimension * primitiveSize * std::max(count, 1);
+ fReqAlignment = std::max(fReqAlignment, align);
return alignedOffset;
}
-
+//////////////////////////////////////////////////////////////////////////////
UniformDataBlock UniformManager::finishUniformDataBlock() {
size_t size = SkAlignTo(fStorage.size(), fReqAlignment);
size_t paddingSize = size - fStorage.size();
- char* padding = fStorage.append(paddingSize);
- memset(padding, 0, paddingSize);
+ if (paddingSize > 0) {
+ char* padding = fStorage.append(paddingSize);
+ memset(padding, 0, paddingSize);
+ }
return UniformDataBlock(SkSpan(fStorage.begin(), size));
}
-
void UniformManager::resetWithNewLayout(Layout layout) {
- if (layout != fLayout) {
- this->setLayout(layout);
- }
- this->reset();
-}
-
-void UniformManager::reset() {
- fOffset = 0;
- fReqAlignment = 0;
fStorage.clear();
+ fLayout = layout;
+ fReqAlignment = 0;
fWrotePaintColor = false;
-}
-
-void UniformManager::checkReset() const {
- SkASSERT(fOffset == 0);
- SkASSERT(fStorage.empty());
-}
-
-void UniformManager::setExpectedUniforms(SkSpan<const Uniform> expectedUniforms) {
- SkDEBUGCODE(fExpectedUniforms = expectedUniforms;)
- SkDEBUGCODE(fExpectedUniformIndex = 0;)
-}
-
-void UniformManager::checkExpected(SkSLType type, unsigned int count) {
- SkASSERT(fExpectedUniforms.size());
- SkASSERT(fExpectedUniformIndex >= 0 && fExpectedUniformIndex < (int)fExpectedUniforms.size());
-
- SkASSERT(fExpectedUniforms[fExpectedUniformIndex].type() == type);
- SkASSERT((fExpectedUniforms[fExpectedUniformIndex].count() == 0 && count == 1) ||
- fExpectedUniforms[fExpectedUniformIndex].count() == count);
- SkDEBUGCODE(fExpectedUniformIndex++;)
-}
-
-void UniformManager::doneWithExpectedUniforms() {
- SkASSERT(fExpectedUniformIndex == static_cast<int>(fExpectedUniforms.size()));
- SkDEBUGCODE(fExpectedUniforms = {};)
-}
-
-void UniformManager::writeInternal(SkSLType type,
- CType ctype,
- unsigned int count,
- const void* src) {
- SkSLType revisedType = this->getUniformTypeForLayout(type);
-
- const uint32_t startOffset = fOffset;
- const uint32_t alignedStartOffset = this->advanceOffset(revisedType, count);
- SkASSERT(fOffset > alignedStartOffset); // `fOffset` now equals the total bytes to be written
- const uint32_t bytesNeeded = fOffset - alignedStartOffset;
-
- // Insert padding if needed.
- if (alignedStartOffset > startOffset) {
- fStorage.append(alignedStartOffset - startOffset);
- }
- char* dst = fStorage.append(bytesNeeded);
- [[maybe_unused]] uint32_t bytesWritten = fWriteUniform(revisedType, ctype, dst, count, src);
- SkASSERT(bytesNeeded == bytesWritten);
-
- fReqAlignment = std::max(fReqAlignment, sksltype_to_alignment_mask(revisedType) + 1);
-}
-
-void UniformManager::write(SkSLType type, const void* src, CType ctype) {
- this->checkExpected(type, 1);
- this->writeInternal(type, ctype, Uniform::kNonArray, src);
-}
-
-void UniformManager::writeArray(SkSLType type, const void* src, unsigned int count, CType ctype) {
- // Don't write any elements if count is 0. Since Uniform::kNonArray == 0, passing count
- // directly would cause a one-element non-array write.
- if (count > 0) {
- this->checkExpected(type, count);
- this->writeInternal(type, ctype, count, src);
+#ifdef SK_DEBUG
+ fOffsetCalculator = UniformOffsetCalculator(layout, 0);
+ fExpectedUniforms = {};
+ fExpectedUniformIndex = 0;
+#endif
+}
+static std::pair<SkSLType, int> adjust_for_matrix_type(SkSLType type, int count) {
+ // All Layouts flatten matrices and arrays of matrices into arrays of columns, so update
+ // 'type' to be the column type and either multiply 'count' by the number of columns for
+ // arrays of matrices, or set to exactly the number of columns for a "non-array" matrix.
+ switch(type) {
+ case SkSLType::kFloat2x2: return {SkSLType::kFloat2, 2*std::max(1, count)};
+ case SkSLType::kFloat3x3: return {SkSLType::kFloat3, 3*std::max(1, count)};
+ case SkSLType::kFloat4x4: return {SkSLType::kFloat4, 4*std::max(1, count)};
+ case SkSLType::kHalf2x2: return {SkSLType::kHalf2, 2*std::max(1, count)};
+ case SkSLType::kHalf3x3: return {SkSLType::kHalf3, 3*std::max(1, count)};
+ case SkSLType::kHalf4x4: return {SkSLType::kHalf4, 4*std::max(1, count)};
+ // Otherwise leave type and count alone.
+ default: return {type, count};
+ }
+}
+void UniformManager::write(const Uniform& u, const void* data) {
+ SkASSERT(SkSLTypeCanBeUniformValue(u.type()));
+ SkASSERT(!u.isPaintColor()); // Must go through writePaintColor()
+ auto [type, count] = adjust_for_matrix_type(u.type(), u.count());
+ SkASSERT(SkSLTypeMatrixSize(type) < 0); // Matrix types should have been flattened
+ const bool fullPrecision = LayoutRules::UseFullPrecision(fLayout) || !IsHalfVector(type);
+ if (count == Uniform::kNonArray) {
+ if (fullPrecision) {
+ switch(SkSLTypeVecLength(type)) {
+ case 1: this->write<1, /*Half=*/false>(data, type); break;
+ case 2: this->write<2, /*Half=*/false>(data, type); break;
+ case 3: this->write<3, /*Half=*/false>(data, type); break;
+ case 4: this->write<4, /*Half=*/false>(data, type); break;
+ }
+ } else {
+ switch(SkSLTypeVecLength(type)) {
+ case 1: this->write<1, /*Half=*/true>(data, type); break;
+ case 2: this->write<2, /*Half=*/true>(data, type); break;
+ case 3: this->write<3, /*Half=*/true>(data, type); break;
+ case 4: this->write<4, /*Half=*/true>(data, type); break;
+ }
+ }
+ } else {
+ if (fullPrecision) {
+ switch(SkSLTypeVecLength(type)) {
+ case 1: this->writeArray<1, /*Half=*/false>(data, count, type); break;
+ case 2: this->writeArray<2, /*Half=*/false>(data, count, type); break;
+ case 3: this->writeArray<3, /*Half=*/false>(data, count, type); break;
+ case 4: this->writeArray<4, /*Half=*/false>(data, count, type); break;
+ }
+ } else {
+ switch(SkSLTypeVecLength(type)) {
+ case 1: this->writeArray<1, /*Half=*/true>(data, count, type); break;
+ case 2: this->writeArray<2, /*Half=*/true>(data, count, type); break;
+ case 3: this->writeArray<3, /*Half=*/true>(data, count, type); break;
+ case 4: this->writeArray<4, /*Half=*/true>(data, count, type); break;
+ }
+ }
}
}
-
-void UniformManager::write(const Uniform& u, const uint8_t* src) {
- this->checkExpected(u.type(), (u.count() == Uniform::kNonArray) ? 1 : u.count());
- this->writeInternal(u.type(), CType::kDefault, u.count(), src);
-}
-
-void UniformManager::write(const SkM44& mat) {
- static constexpr SkSLType kType = SkSLType::kFloat4x4;
- this->write(kType, &mat);
-}
-
-void UniformManager::write(const SkMatrix& mat) {
- static constexpr SkSLType kType = SkSLType::kFloat3x3;
- this->write(kType, &mat, CType::kSkMatrix);
-}
-
-void UniformManager::write(const SkPMColor4f& color) {
- static constexpr SkSLType kType = SkSLType::kFloat4;
- this->write(kType, &color);
-}
-
-// This is a specialized uniform writing entry point intended to deduplicate the paint
-// color. If a more general system is required, the deduping logic can be added to the
-// other write methods (and this specialized method would be removed).
-void UniformManager::writePaintColor(const SkPMColor4f& color) {
- static constexpr SkSLType kType = SkSLType::kFloat4;
-
- SkASSERT(fExpectedUniforms[fExpectedUniformIndex].isPaintColor());
- if (fWrotePaintColor) {
- this->checkExpected(kType, 1);
- return;
+#ifdef SK_DEBUG
+bool UniformManager::checkExpected(const void* dst, SkSLType type, int count) {
+ if (fExpectedUniformIndex >= (int) fExpectedUniforms.size()) {
+ // A write() outside of a UniformExpectationsVisitor or too many uniforms written for what
+ // is expected.
+ return false;
+ }
+ const Uniform& expected = fExpectedUniforms[fExpectedUniformIndex++];
+ if (!SkSLTypeCanBeUniformValue(expected.type())) {
+ // Not all types are supported as uniforms or supported by UniformManager
+ return false;
+ }
+ auto [expectedType, expectedCount] = adjust_for_matrix_type(expected.type(), expected.count());
+ if (expectedType != type || expectedCount != count) {
+ return false;
+ }
+ if (dst) {
+ // If we have 'dst', it's the aligned starting offset of the uniform being checked, so
+ // subtracting the address of the first byte in fStorage gives us the offset.
+ int offset = static_cast<int>(reinterpret_cast<intptr_t>(dst) -
+ reinterpret_cast<intptr_t>(fStorage.data()));
+ // Pass original expected type and count to the offset calculator for validation.
+ if (offset != fOffsetCalculator.advanceOffset(expected.type(), expected.count())) {
+ return false;
+ }
+ if (fReqAlignment != fOffsetCalculator.requiredAlignment()) {
+ return false;
+ }
+ // And if it is the paint color uniform, we should not have already written it
+ return !(fWrotePaintColor && expected.isPaintColor());
+ } else {
+ // If 'dst' is null, it's an already-visited paint color uniform, so it's not being written
+ // and not changing the offset.
+ SkASSERT(fWrotePaintColor);
+ return expected.isPaintColor();
}
-
- fWrotePaintColor = true;
- this->write(kType, &color);
-}
-
-void UniformManager::write(const SkRect& rect) {
- static constexpr SkSLType kType = SkSLType::kFloat4;
- this->write(kType, &rect);
-}
-
-void UniformManager::write(const SkPoint& point) {
- static constexpr SkSLType kType = SkSLType::kFloat2;
- this->write(kType, &point);
-}
-
-void UniformManager::write(const SkSize& size) {
- static constexpr SkSLType kType = SkSLType::kFloat2;
- this->write(kType, &size);
-}
-
-void UniformManager::write(const SkPoint3& point3) {
- static constexpr SkSLType kType = SkSLType::kFloat3;
- this->write(kType, &point3);
}
-
-void UniformManager::write(float f) {
- static constexpr SkSLType kType = SkSLType::kFloat;
- this->write(kType, &f);
-}
-
-void UniformManager::write(int i) {
- static constexpr SkSLType kType = SkSLType::kInt;
- this->write(kType, &i);
-}
-
-void UniformManager::write(const SkV2& v) {
- static constexpr SkSLType kType = SkSLType::kFloat2;
- this->write(kType, &v);
-}
-
-void UniformManager::write(const SkV4& v) {
- static constexpr SkSLType kType = SkSLType::kFloat4;
- this->write(kType, &v);
-}
-
-void UniformManager::writeArray(SkSpan<const SkColor4f> arr) {
- static constexpr SkSLType kType = SkSLType::kFloat4;
- this->writeArray(kType, arr.data(), arr.size());
-}
-
-void UniformManager::writeArray(SkSpan<const SkPMColor4f> arr) {
- static constexpr SkSLType kType = SkSLType::kFloat4;
- this->writeArray(kType, arr.data(), arr.size());
-}
-
-void UniformManager::writeArray(SkSpan<const float> arr) {
- static constexpr SkSLType kType = SkSLType::kFloat;
- this->writeArray(kType, arr.data(), arr.size());
-}
-
-void UniformManager::writeHalf(float f) {
- static constexpr SkSLType kType = SkSLType::kHalf;
- this->write(kType, &f);
-}
-
-void UniformManager::writeHalf(const SkMatrix& mat) {
- static constexpr SkSLType kType = SkSLType::kHalf3x3;
- this->write(kType, &mat, CType::kSkMatrix);
-}
-
-void UniformManager::writeHalf(const SkM44& mat) {
- static constexpr SkSLType kType = SkSLType::kHalf4x4;
- this->write(kType, &mat);
+bool UniformManager::isReset() const {
+ return fStorage.empty();
}
-
-void UniformManager::writeHalf(const SkColor4f& unpremulColor) {
- static constexpr SkSLType kType = SkSLType::kHalf4;
- this->write(kType, &unpremulColor);
+void UniformManager::setExpectedUniforms(SkSpan<const Uniform> expected) {
+ fExpectedUniforms = expected;
+ fExpectedUniformIndex = 0;
}
-
-void UniformManager::writeHalfArray(SkSpan<const float> arr) {
- static constexpr SkSLType kType = SkSLType::kHalf;
- this->writeArray(kType, arr.data(), arr.size());
+void UniformManager::doneWithExpectedUniforms() {
+ SkASSERT(fExpectedUniformIndex == static_cast<int>(fExpectedUniforms.size()));
+ fExpectedUniforms = {};
}
-
+#endif // SK_DEBUG
} // namespace skgpu::graphite
#ifndef skgpu_UniformManager_DEFINED
#define skgpu_UniformManager_DEFINED
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
#include "include/core/SkSpan.h"
#include "include/private/SkColorData.h"
+#include "include/private/base/SkAlign.h"
#include "include/private/base/SkTDArray.h"
+#include "src/base/SkHalf.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkMatrixPriv.h"
#include "src/core/SkSLTypeShared.h"
#include "src/gpu/graphite/ResourceTypes.h"
#include "src/gpu/graphite/Uniform.h"
-class SkM44;
-class SkMatrix;
-struct SkPoint;
-struct SkPoint3;
-struct SkRect;
-struct SkSize;
-struct SkV2;
-struct SkV4;
+#include <algorithm>
+#include <memory>
namespace skgpu::graphite {
-enum class CType : unsigned {
- // Any float/half, vector of floats/half, or matrices of floats/halfs are a tightly
- // packed array of floats. Similarly, any bool/shorts/ints are a tightly packed array
- // of int32_t.
- kDefault,
- // Can be used with kFloat3x3 or kHalf3x3. SkMatrix stores its data in row-major order, so
- // cannot be copied directly to uniforms that expect col-major order. SkM44 is already
- // column-major so can use kDefault.
- kSkMatrix,
-
- kLast = kSkMatrix
-};
-
class UniformDataBlock;
+/**
+ * Layout::kStd140
+ * ===============
+ *
+ * From OpenGL Specification Section 7.6.2.2 "Standard Uniform Block Layout":
+ * 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
+ * 2. If the member is a two- or four-component vector with components consuming N basic machine
+ * units, the base alignment is 2N or 4N, respectively.
+ * 3. If the member is a three-component vector with components consuming N
+ * basic machine units, the base alignment is 4N.
+ * 4. If the member is an array of scalars or vectors, the base alignment and array
+ * stride are set to match the base alignment of a single array element, according
+ * to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
+ * array may have padding at the end; the base offset of the member following
+ * the array is rounded up to the next multiple of the base alignment.
+ * 5. If the member is a column-major matrix with C columns and R rows, the
+ * matrix is stored identically to an array of C column vectors with R components each,
+ * according to rule (4).
+ * 6. If the member is an array of S column-major matrices with C columns and
+ * R rows, the matrix is stored identically to a row of S × C column vectors
+ * with R components each, according to rule (4).
+ * 7. If the member is a row-major matrix with C columns and R rows, the matrix
+ * is stored identically to an array of R row vectors with C components each,
+ * according to rule (4).
+ * 8. If the member is an array of S row-major matrices with C columns and R
+ * rows, the matrix is stored identically to a row of S × R row vectors with C
+ * components each, according to rule (4).
+ * 9. If the member is a structure, the base alignment of the structure is N, where
+ * N is the largest base alignment value of any of its members, and rounded
+ * up to the base alignment of a vec4. The individual members of this substructure are then
+ * assigned offsets by applying this set of rules recursively,
+ * where the base offset of the first member of the sub-structure is equal to the
+ * aligned offset of the structure. The structure may have padding at the end;
+ * the base offset of the member following the sub-structure is rounded up to
+ * the next multiple of the base alignment of the structure.
+ * 10. If the member is an array of S structures, the S elements of the array are laid
+ * out in order, according to rule (9).
+ *
+ * Layout::kStd430
+ * ===============
+ *
+ * When using the std430 storage layout, shader storage blocks will be laid out in buffer storage
+ * identically to uniform and shader storage blocks using the std140 layout, except that the base
+ * alignment and stride of arrays of scalars and vectors in rule 4 and of structures in rule 9 are
+ * not rounded up a multiple of the base alignment of a vec4.
+ *
+ * NOTE: While not explicitly stated, the layout rules for WebGPU and WGSL are identical to std430
+ * for SSBOs and nearly identical to std140 for UBOs. 2x2 matrices are treated as two float2's
+ * (not an array), so the size is 16 and alignment is 8 (vs. size and alignment of 16 for a float4
+ * or a size of 32 and alignment of 16 for a float2[2] in std140).
+ *
+ * Layout::kMetal
+ * ===============
+ *
+ * SkSL converts its types to the non-packed SIMD vector types in MSL. The size and alignment rules
+ * are equivalent to std430 with the exception of half3 and float3. In std430, the size consumed
+ * by non-array uniforms of these types is 3N while Metal consumes 4N (which is equal to the
+ * alignment of a vec3 in both Layouts).
+ *
+ * Half vs. Float Uniforms
+ * =======================
+ *
+ * Regardless of the precision when the shader is executed, std140 and std430 layouts consume
+ * "half"-based uniforms in full 32-bit precision. Metal consumes "half"-based uniforms expecting
+ * them to have already been converted to f16. WebGPU has an extension to support f16 types, which
+ * behave like this, but we do not currently utilize it.
+ *
+ * The rules for std430 can be easily extended to f16 by applying N = 2 instead of N = 4 for the
+ * base primitive alignment.
+ *
+ * NOTE: This could also apply to the int vs. short or uint vs. ushort types, but these smaller
+ * integer types are not supported on all platforms as uniforms. We disallow short integer uniforms
+ * entirely, and if the data savings are required, packing should be implemented manually.
+ * Short integer vertex attributes are supported when the vector type lets it pack into 32 bits
+ * (e.g. int16x2 or int8x4).
+ *
+ *
+ * Generalized Layout Rules
+ * ========================
+ *
+ * From the Layout descriptions above, the following simpler rules are sufficient:
+ *
+ * 1. If the base primitive type is "half" and the Layout expects half floats, N = 2; else, N = 4.
+ *
+ * 2. For arrays of scalars or vectors (with # of components, M = 1,2,3,4):
+ * a. If arrays must be aligned on vec4 boundaries OR M=3, then align and stride = 4*N.
+ * b. Otherwise, the align and stride = M*N.
+ *
+ * In both cases, the total size required for the uniform is "array size"*stride.
+ *
+ * 3. For single scalars or vectors (M = 1,2,3,4), the align is SkNextPow2(M)*N (e.g. N,2N,4N,4N).
+ * a. If M = 3 and the Layout aligns the size with the alignment, the size is 4*N and N
+ * padding bytes must be zero'ed out afterwards.
+ * b. Otherwise, the align and size = M*N
+ *
+ * 4. The starting offset to write data is the current offset aligned to the calculated align value.
+ * The current offset is then incremented by the total size of the uniform.
+ *
+ * For arrays and padded vec3's, the padding is included in the stride and total size, meeting
+ * the requirements of the original rule 4 in std140. When a single float3 that is not padded
+ * is written, the next offset only advances 12 bytes allowing a smaller type to pack tightly
+ * next to the Z coordinate.
+ *
+ * When N = 4, the CPU and GPU primitives are compatible, regardless of being float, int, or uint.
+ * Contiguous ranges between any padding (for alignment or for array stride) can be memcpy'ed.
+ * When N = 2, the CPU data is float and the GPU data f16, so values must be converted one primitive
+ * at a time using SkFloatToHalf or skvx::to_half.
+ *
+ * The UniformManager will zero out any padding bytes (either prepended for starting alignment,
+ * or appended for stride alignment). This is so that the final byte array can be hashed for uniform
+ * value de-duplication before uploading to the GPU.
+ *
+ * While SkSL supports non-square matrices, the SkSLType enum and Graphite only expose support for
+ * square matrices. Graphite assumes all matrix uniforms are in column-major order. This matches the
+ * data layout of SkM44 already and UniformManager automatically transposes SkMatrix (which is in
+ * row-major data) to be column-major. Thus, for layout purposes, a matrix or an array of matrices
+ * can be laid out equivalently to an array of the column type with an array count multiplied by the
+ * number of columns.
+ *
+ * Graphite does not embed structs within structs for its UBO or SSBO declarations for paint or
+ * RenderSteps. However, when the "uniforms" are defined for use with SSBO random access, the
+ * ordered set of uniforms is actually defining a struct instead of just a top-level interface.
+ * As such, once all uniforms are recorded, the size must be rounded up to the maximum alignment
+ * encountered for its members to satisfy alignment rules for all Layouts.
+ *
+ * If Graphite starts to define sub-structs, UniformOffsetCalculator can be used recursively.
+ */
+namespace LayoutRules {
+ // The three diverging behaviors across the different Layouts:
+ static constexpr bool PadVec3Size(Layout layout) { return layout == Layout::kMetal; }
+ static constexpr bool AlignArraysAsVec4(Layout layout) { return layout == Layout::kStd140; }
+ static constexpr bool UseFullPrecision(Layout layout) { return layout != Layout::kMetal; }
+}
+
class UniformOffsetCalculator {
public:
- UniformOffsetCalculator(Layout layout, uint32_t startingOffset);
+ UniformOffsetCalculator() = default;
+ UniformOffsetCalculator(Layout layout, int offset) : fLayout(layout), fOffset(offset) {}
- size_t size() const { return fOffset; }
+ // NOTE: The returned size represents the last consumed byte (if the recorded
+ // uniforms are embedded within a struct, this will need to be rounded up to a multiple of
+ // requiredAlignment()).
+ int size() const { return fOffset; }
+ int requiredAlignment() const { return fReqAlignment; }
// Calculates the correctly aligned offset to accommodate `count` instances of `type` and
// advances the internal offset. Returns the correctly aligned start offset.
// After a call to this method, `size()` will return the offset to the end of `count` instances
// of `type` (while the return value equals the aligned start offset). Subsequent calls will
// calculate the new start offset starting at `size()`.
- size_t advanceOffset(SkSLType type, unsigned int count);
-
-protected:
- SkSLType getUniformTypeForLayout(SkSLType type);
- void setLayout(Layout);
-
- using WriteUniformFn = uint32_t (*)(SkSLType type,
- CType ctype,
- void *dest,
- int n,
- const void *src);
+ int advanceOffset(SkSLType type, int count = Uniform::kNonArray);
- WriteUniformFn fWriteUniform;
- Layout fLayout; // TODO: eventually 'fLayout' will not need to be stored
- uint32_t fOffset = 0;
+private:
+ Layout fLayout = Layout::kInvalid;
+ int fOffset = 0;
+ int fReqAlignment = 0;
};
-class UniformManager : public UniformOffsetCalculator {
+class UniformManager {
public:
- UniformManager(Layout layout) : UniformOffsetCalculator(layout, /*startingOffset=*/0) {}
+ UniformManager(Layout layout) { this->resetWithNewLayout(layout); }
UniformDataBlock finishUniformDataBlock();
size_t size() const { return fStorage.size(); }
- void resetWithNewLayout(Layout);
- void reset();
+ void resetWithNewLayout(Layout layout);
+ void reset() { this->resetWithNewLayout(fLayout); }
+
+ // scalars
+ void write(float f) { this->write<SkSLType::kFloat>(&f); }
+ void write(int32_t i) { this->write<SkSLType::kInt >(&i); }
+ void writeHalf(float f) { this->write<SkSLType::kHalf >(&f); }
+
+ // [i|h]vec4 and arrays thereof (just add overloads as needed)
+ void write(const SkPMColor4f& c) { this->write<SkSLType::kFloat4>(c.vec()); }
+ void write(const SkRect& r) { this->write<SkSLType::kFloat4>(r.asScalars()); }
+ void write(const SkV4& v) { this->write<SkSLType::kFloat4>(v.ptr()); }
+
+ void write(const SkIRect& r) { this->write<SkSLType::kInt4>(&r); }
+
+ void writeHalf(const SkPMColor4f& c) { this->write<SkSLType::kHalf4>(c.vec()); }
+ void writeHalf(const SkRect& r) { this->write<SkSLType::kHalf4>(r.asScalars()); }
+ void writeHalf(const SkV4& v) { this->write<SkSLType::kHalf4>(v.ptr()); }
+
+ void writeArray(SkSpan<const SkV4> v) {
+ this->writeArray<SkSLType::kFloat4>(v.data(), v.size());
+ }
+ void writeArray(SkSpan<const SkPMColor4f> c) {
+ this->writeArray<SkSLType::kFloat4>(c.data(), c.size());
+ }
+ void writeHalfArray(SkSpan<const SkPMColor4f> c) {
+ this->writeArray<SkSLType::kHalf4>(c.data(), c.size());
+ }
+
+ // [i|h]vec3
+ void write(const SkV3& v) { this->write<SkSLType::kFloat3>(v.ptr()); }
+ void write(const SkPoint3& p) { this->write<SkSLType::kFloat3>(&p); }
+
+ void writeHalf(const SkV3& v) { this->write<SkSLType::kHalf3>(v.ptr()); }
+ void writeHalf(const SkPoint3& p) { this->write<SkSLType::kHalf3>(&p); }
+
+ // NOTE: 3-element vectors never pack efficiently in arrays, so avoid using them
+
+ // [i|h]vec2
+ void write(const SkV2& v) { this->write<SkSLType::kFloat2>(v.ptr()); }
+ void write(const SkSize& s) { this->write<SkSLType::kFloat2>(&s); }
+ void write(const SkPoint& p) { this->write<SkSLType::kFloat2>(&p); }
- // Write a single instance of `type` from the data block referenced by `src`.
- // DEPRECATED: Prefer to use a compile-time typed write method.
- void write(SkSLType type, const void* src, CType ctype = CType::kDefault);
+ void write(const SkISize& s) { this->write<SkSLType::kInt2>(&s); }
- // Write an array of `type` with `count` elements from the data block referenced by `src`.
- // Does nothing if `count` is 0.
- // DEPRECATED: Prefer to use a compile-time typed write method.
- void writeArray(SkSLType type, const void* src, unsigned int count,
- CType ctype = CType::kDefault);
+ void writeHalf(const SkV2& v) { this->write<SkSLType::kHalf2>(v.ptr()); }
+ void writeHalf(const SkSize& s) { this->write<SkSLType::kHalf2>(&s); }
+ void writeHalf(const SkPoint& p) { this->write<SkSLType::kHalf2>(&p); }
+
+ // NOTE: 2-element vectors don't pack efficiently in std140, so avoid using them
+
+ // matrices
+ void write(const SkM44& m) {
+ // All Layouts treat a 4x4 column-major matrix as an array of vec4's, which is exactly how
+ // SkM44 already stores its data.
+ this->writeArray<SkSLType::kFloat4>(SkMatrixPriv::M44ColMajor(m), 4);
+ }
+
+ void writeHalf(const SkM44& m) {
+ this->writeArray<SkSLType::kHalf4>(SkMatrixPriv::M44ColMajor(m), 4);
+ }
+
+ void write(const SkMatrix& m) {
+ // SkMatrix is row-major, so rewrite to column major. All Layouts treat a 3x3 column
+ // major matrix as an array of vec3's.
+ float colMajor[9] = {m[0], m[3], m[6],
+ m[1], m[4], m[7],
+ m[2], m[5], m[8]};
+ this->writeArray<SkSLType::kFloat3>(colMajor, 3);
+ }
+ void writeHalf(const SkMatrix& m) {
+ float colMajor[9] = {m[0], m[3], m[6],
+ m[1], m[4], m[7],
+ m[2], m[5], m[8]};
+ this->writeArray<SkSLType::kHalf3>(colMajor, 3);
+ }
+
+ // NOTE: 2x2 matrices can be manually packed the same or better as a vec4, so prefer that
+
+ // This is a specialized uniform writing entry point intended to deduplicate the paint
+ // color. If a more general system is required, the deduping logic can be added to the
+ // other write methods (and this specialized method would be removed).
+ void writePaintColor(const SkPMColor4f& color) {
+ if (fWrotePaintColor) {
+ // Validate expected uniforms, but don't write a second copy since the paint color
+ // uniform can only ever be declared once in the final SkSL program.
+ SkASSERT(this->checkExpected(/*dst=*/nullptr, SkSLType::kFloat4, Uniform::kNonArray));
+ } else {
+ this->write<SkSLType::kFloat4>(&color);
+ fWrotePaintColor = true;
+ }
+ }
// Copy from `src` using Uniform array-count semantics.
- void write(const Uniform&, const uint8_t* src);
-
- void write(const SkM44&);
- void write(const SkMatrix&);
- void write(const SkPMColor4f&);
- void writePaintColor(const SkPMColor4f&);
- void write(const SkRect&);
- void write(const SkV2&);
- void write(const SkV4&);
- void write(const SkSize&);
- void write(const SkPoint&);
- void write(const SkPoint3&);
- void write(float f);
- void write(int);
-
- void writeArray(SkSpan<const SkColor4f>);
- void writeArray(SkSpan<const SkPMColor4f>);
- void writeArray(SkSpan<const float>);
-
- void writeHalf(float f);
- void writeHalf(const SkMatrix&);
- void writeHalf(const SkM44&);
- void writeHalf(const SkColor4f&);
- void writeHalfArray(SkSpan<const float>);
-
- // Debug only utilities used for debug assertions and tests.
- void checkReset() const;
- void setExpectedUniforms(SkSpan<const Uniform>);
- void checkExpected(SkSLType, unsigned int count);
+ void write(const Uniform&, const void* src);
+
+ // Debug-only functions to control uniform expectations.
+#ifdef SK_DEBUG
+ bool isReset() const;
+ void setExpectedUniforms(SkSpan<const Uniform> expected);
void doneWithExpectedUniforms();
+#endif // SK_DEBUG
private:
- // Writes a single element of the given `type` if `count` == 0 (aka Uniform::kNonArray).
- // Writes an array of `count` elements if `count` > 0, obeying any array layout constraints.
- //
- // Do not call this method directly for any new write()/writeArray() overloads. Instead
- // call the write(SkSLType, const void*) and writeArray(SkSLType, const void*, unsigned int)
- // overloads which correctly abstract the array vs non-array semantics.
- void writeInternal(SkSLType type, CType ctype, unsigned int count, const void* src);
+ // All public write() functions in UniformManager already match scalar/vector SkSLTypes or have
+ // explicitly converted matrix SkSLTypes to a writeArray<column type> so this does not need to
+ // check anything beyond half[2,3,4].
+ static constexpr bool IsHalfVector(SkSLType type) {
+ return type >= SkSLType::kHalf && type <= SkSLType::kHalf4;
+ }
+ // Other than validation, actual layout doesn't care about 'type' and the logic can be
+ // based on vector length and whether or not it's half or full precision.
+ template <int N, bool Half> void write(const void* src, SkSLType type);
+ template <int N, bool Half> void writeArray(const void* src, int count, SkSLType type);
+
+ // Helpers to select dimensionality and convert to full precision if required by the Layout.
+ template <SkSLType Type> void write(const void* src) {
+ static constexpr int N = SkSLTypeVecLength(Type);
+ if (IsHalfVector(Type) && !LayoutRules::UseFullPrecision(fLayout)) {
+ this->write<N, /*Half=*/true>(src, Type);
+ } else {
+ this->write<N, /*Half=*/false>(src, Type);
+ }
+ }
+ template <SkSLType Type> void writeArray(const void* src, int count) {
+ static constexpr int N = SkSLTypeVecLength(Type);
+ if (IsHalfVector(Type) && !LayoutRules::UseFullPrecision(fLayout)) {
+ this->writeArray<N, /*Half=*/true>(src, count, Type);
+ } else {
+ this->writeArray<N, /*Half=*/false>(src, count, Type);
+ }
+ }
+
+ // This is marked 'inline' so that it can be defined below with write() and writeArray() and
+ // still link correctly.
+ inline char* append(int alignment, int size);
+
+ SkTDArray<char> fStorage;
+
+ Layout fLayout;
+ int fReqAlignment = 0;
// The paint color is treated special and we only add its uniform once.
bool fWrotePaintColor = false;
+
+ // Debug-only verification that UniformOffsetCalculator is consistent and that write() calls
+ // match the expected uniform declaration order.
#ifdef SK_DEBUG
+ UniformOffsetCalculator fOffsetCalculator; // should match implicit offsets from getWriteDst()
SkSpan<const Uniform> fExpectedUniforms;
int fExpectedUniformIndex = 0;
+
+ bool checkExpected(const void* dst, SkSLType, int count);
#endif // SK_DEBUG
+};
- SkTDArray<char> fStorage;
- uint32_t fReqAlignment = 0;
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Definitions
+
+// Shared helper for both write() and writeArray()
+template <int N, bool Half>
+struct LayoutTraits {
+ static_assert(1 <= N && N <= 4);
+
+ static constexpr int kElemSize = Half ? sizeof(SkHalf) : sizeof(float);
+ static constexpr int kSize = N * kElemSize;
+ static constexpr int kAlign = SkNextPow2_portable(N) * kElemSize;
+
+ // Reads kSize bytes from 'src' and copies or converts (float->half) the N values
+ // into 'dst'. Does not add any other padding that may depend on usage and Layout.
+ static void Copy(const void* src, void* dst) {
+ if constexpr (Half) {
+ // TODO(b/318684744): Use `to_half(skvx::float4::Load(src)).store(dst);` when N=4
+ // once values encode the same as SkFloatToHalf.
+ const float* srcF = reinterpret_cast<const float*>(src);
+ SkHalf* dstH = reinterpret_cast<SkHalf*>(dst);
+ for (int i = 0; i < N; ++i) {
+ *dstH++ = SkFloatToHalf(*srcF++);
+ }
+ } else {
+ memcpy(dst, src, kSize);
+ }
+ }
+
+#ifdef SK_DEBUG
+ static void Validate(const void* src, SkSLType type, Layout layout) {
+ // Src validation
+ SkASSERT(src);
+ // All primitives on the CPU side should be 4 byte aligned
+ SkASSERT(SkIsAlign4(reinterpret_cast<intptr_t>(src)));
+
+ // Type and validation layout
+ SkASSERT(SkSLTypeCanBeUniformValue(type));
+ SkASSERT(SkSLTypeVecLength(type) == N); // Matrix types should have been flattened already
+ if constexpr (Half) {
+ SkASSERT(SkSLTypeIsFloatType(type));
+ SkASSERT(!SkSLTypeIsFullPrecisionNumericType(type));
+ SkASSERT(!LayoutRules::UseFullPrecision(layout));
+ } else {
+ SkASSERT(SkSLTypeIsFullPrecisionNumericType(type) ||
+ LayoutRules::UseFullPrecision(layout));
+ }
+ }
+#endif
};
+template<int N, bool Half>
+void UniformManager::write(const void* src, SkSLType type) {
+ using L = LayoutTraits<N, Half>;
+ SkDEBUGCODE(L::Validate(src, type, fLayout);)
+
+ // Layouts diverge in how vec3 size is determined for non-array usage
+ char* dst = (N == 3 && LayoutRules::PadVec3Size(fLayout))
+ ? this->append(L::kAlign, L::kSize + L::kElemSize)
+ : this->append(L::kAlign, L::kSize);
+ SkASSERT(this->checkExpected(dst, type, Uniform::kNonArray));
+
+ L::Copy(src, dst);
+ if (N == 3 && LayoutRules::PadVec3Size(fLayout)) {
+ memset(dst + L::kSize, 0, L::kElemSize);
+ }
+}
+
+template<int N, bool Half>
+void UniformManager::writeArray(const void* src, int count, SkSLType type) {
+ using L = LayoutTraits<N, Half>;
+ static constexpr int kSrcStride = N * 4; // Source data is always in multiples of 4 bytes.
+
+ SkDEBUGCODE(L::Validate(src, type, fLayout);)
+ SkASSERT(count > 0);
+
+ if (Half || N == 3 || (N != 4 && LayoutRules::AlignArraysAsVec4(fLayout))) {
+ // A non-dense array (N == 3 is always padded to vec4, or the Layout requires it),
+ // or we have to perform half conversion so iterate over each element.
+ static constexpr int kStride = Half ? L::kAlign : 4*L::kElemSize;
+ SkASSERT(!(Half && LayoutRules::AlignArraysAsVec4(fLayout))); // should be exclusive
+
+ const char* srcBytes = reinterpret_cast<const char*>(src);
+ char* dst = this->append(kStride, kStride*count);
+ SkASSERT(this->checkExpected(dst, type, count));
+
+ for (int i = 0; i < count; ++i) {
+ L::Copy(srcBytes, dst);
+ if constexpr (kStride - L::kSize > 0) {
+ memset(dst + L::kSize, 0, kStride - L::kSize);
+ }
+
+ dst += kStride;
+ srcBytes += kSrcStride;
+ }
+ } else {
+ // A dense array with no type conversion, so copy in one go.
+ SkASSERT(L::kAlign == L::kSize && kSrcStride == L::kSize);
+ char* dst = this->append(L::kAlign, L::kSize*count);
+ SkASSERT(this->checkExpected(dst, type, count));
+
+ memcpy(dst, src, L::kSize*count);
+ }
+}
+
+char* UniformManager::append(int alignment, int size) {
+ SkASSERT(size > 0);
+
+ const int offset = fStorage.size();
+ const int padding = SkAlignTo(offset, alignment) - offset;
+
+ // These are just asserts not aborts because SkSL compilation imposes limits on the size of
+ // runtime effect arrays, and internal shaders should not be using excessive lengths.
+ SkASSERT(std::numeric_limits<int>::max() - alignment >= offset);
+ SkASSERT(std::numeric_limits<int>::max() - size >= padding);
+
+ char* dst = fStorage.append(size + padding);
+ if (padding > 0) {
+ memset(dst, 0, padding);
+ dst += padding;
+ }
+
+ fReqAlignment = std::max(fReqAlignment, alignment);
+ return dst;
+}
+
} // namespace skgpu::graphite
#endif // skgpu_UniformManager_DEFINED
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
-
#include "src/base/SkHalf.h"
#include "src/core/SkSLTypeShared.h"
#include "src/gpu/graphite/PipelineData.h"
#include "src/gpu/graphite/Uniform.h"
#include "src/gpu/graphite/UniformManager.h"
#include "tests/Test.h"
-
using namespace skgpu::graphite;
-
static constexpr Layout kLayouts[] = {
Layout::kStd140,
Layout::kStd430,
Layout::kMetal,
};
-
// This list excludes SkSLTypes that we don't support in uniforms, like Bool, UInt or UShort.
static constexpr SkSLType kTypes[] = {
- SkSLType::kShort, SkSLType::kShort2, SkSLType::kShort3, SkSLType::kShort4, //
SkSLType::kFloat, SkSLType::kFloat2, SkSLType::kFloat3, SkSLType::kFloat4, //
SkSLType::kHalf, SkSLType::kHalf2, SkSLType::kHalf3, SkSLType::kHalf4, //
SkSLType::kInt, SkSLType::kInt2, SkSLType::kInt3, SkSLType::kInt4, //
SkSLType::kFloat2x2, SkSLType::kFloat3x3, SkSLType::kFloat4x4, //
SkSLType::kHalf2x2, SkSLType::kHalf3x3, SkSLType::kHalf4x4,
};
-
static constexpr float kFloats[16] = { 1.0f, 2.0f, 3.0f, 4.0f,
5.0f, 6.0f, 7.0f, 8.0f,
9.0f, 10.0f, 11.0f, 12.0f,
13.0f, 14.0f, 15.0f, 16.0f };
-
static constexpr SkHalf kHalfs[16] = { 0x3C00, 0x4000, 0x4200, 0x4400,
0x4500, 0x4600, 0x4700, 0x4800,
0x4880, 0x4900, 0x4980, 0x4A00,
0x4A80, 0x4B00, 0x4B80, 0x4C00 };
-
-static constexpr int16_t kShorts[16] = { 1, -2, 3, -4,
- 5, -6, 7, -8,
- 9, -10, 11, -12,
- 13, -14, 15, -16 };
-
static constexpr int32_t kInts[16] = { 1, -2, 3, -4,
5, -6, 7, -8,
9, -10, 11, -12,
13, -14, 15, -16 };
-
static size_t element_size(Layout layout, SkSLType type) {
// Metal encodes half-precision uniforms in 16 bits.
// Other layouts are expected to encode uniforms in 32 bits.
return (layout == Layout::kMetal && !SkSLTypeIsFullPrecisionNumericType(type)) ? 2 : 4;
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckSingleUniform, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager can hold all the basic uniform types, in every layout.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type : kTypes) {
const Uniform expectations[] = {{"uniform", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
- REPORTER_ASSERT(r, mgr.size() > 0);
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
+ REPORTER_ASSERT(r, mgr.size() > 0, "Layout: %s - Type: %s",
+ LayoutString(layout), SkSLTypeString(type));
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckFloatEncoding, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager encodes float data properly.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type : kTypes) {
// Only test scalar and vector floats. (Matrices can introduce padding between values.)
int vecLength = SkSLTypeVecLength(type);
if (!SkSLTypeIsFloatType(type) || vecLength < 1) {
continue;
}
-
// Write our uniform float scalar/vector.
const Uniform expectations[] = {{"uniform", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// Read back the uniform data.
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
size_t elementSize = element_size(layout, type);
const void* validData = (elementSize == 4) ? (const void*)kFloats : (const void*)kHalfs;
REPORTER_ASSERT(r, uniformData.size() >= vecLength * elementSize);
REPORTER_ASSERT(r, 0 == memcmp(validData, uniformData.data(), vecLength * elementSize),
- "Layout: %d - Type: %s float encoding failed",
- (int)layout, SkSLTypeString(type));
+ "Layout: %s - Type: %s float encoding failed",
+ LayoutString(layout), SkSLTypeString(type));
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckIntEncoding, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager encodes int data properly.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type : kTypes) {
if (!SkSLTypeIsIntegralType(type)) {
continue;
}
-
// Write our uniform int scalar/vector.
const Uniform expectations[] = {{"uniform", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kInts);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kInts);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// Read back the uniform data.
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
int vecLength = SkSLTypeVecLength(type);
size_t elementSize = element_size(layout, type);
- const void* validData = (elementSize == 4) ? (const void*)kInts : (const void*)kShorts;
REPORTER_ASSERT(r, uniformData.size() >= vecLength * elementSize);
- REPORTER_ASSERT(r, 0 == memcmp(validData, uniformData.data(), vecLength * elementSize),
- "Layout: %d - Type: %s int encoding failed",
- (int)layout, SkSLTypeString(type));
+ REPORTER_ASSERT(r, 0 == memcmp(kInts, uniformData.data(), vecLength * elementSize),
+ "Layout: %s - Type: %s int encoding failed",
+ LayoutString(layout), SkSLTypeString(type));
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckScalarVectorPacking, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager can pack scalars and vectors of identical type correctly.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type : kTypes) {
int vecLength = SkSLTypeVecLength(type);
if (vecLength < 1) {
continue;
}
-
// Write three matching uniforms.
const Uniform expectations[] = {{"a", type}, {"b", type}, {"c", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.write(type, kFloats);
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ mgr.write(expectations[1], kFloats);
+ mgr.write(expectations[2], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// Verify the uniform data packing.
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
size_t elementSize = element_size(layout, type);
// Vec3s must be laid out as if they were vec4s.
size_t effectiveVecLength = (vecLength == 3) ? 4 : vecLength;
REPORTER_ASSERT(r, uniformData.size() == elementSize * effectiveVecLength * 3,
- "Layout: %d - Type: %s tight packing failed",
- (int)layout, SkSLTypeString(type));
+ "Layout: %s - Type: %s tight packing failed",
+ LayoutString(layout), SkSLTypeString(type));
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckMatrixPacking, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager can pack matrices correctly.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type : kTypes) {
int matrixSize = SkSLTypeMatrixSize(type);
if (matrixSize < 2) {
continue;
}
-
// Write three matching uniforms.
const Uniform expectations[] = {{"a", type}, {"b", type}, {"c", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.write(type, kFloats);
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ mgr.write(expectations[1], kFloats);
+ mgr.write(expectations[2], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// Verify the uniform data packing.
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
size_t elementSize = element_size(layout, type);
numElements = matrixSize * matrixSize;
}
REPORTER_ASSERT(r, uniformData.size() == elementSize * numElements * 3,
- "Layout: %d - Type: %s matrix packing failed",
- (int)layout, SkSLTypeString(type));
+ "Layout: %s - Type: %s matrix packing failed",
+ LayoutString(layout), SkSLTypeString(type));
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckPaddingScalarVector, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager properly adds padding between pairs of scalar/vector.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type1 : kTypes) {
const int vecLength1 = SkSLTypeVecLength(type1);
if (vecLength1 < 1) {
continue;
}
-
for (SkSLType type2 : kTypes) {
const int vecLength2 = SkSLTypeVecLength(type2);
if (vecLength2 < 1) {
continue;
}
-
// Write two scalar/vector uniforms.
const Uniform expectations[] = {{"a", type1}, {"b", type2}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type1, kFloats);
- mgr.write(type2, kFloats);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ mgr.write(expectations[1], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// The expected packing varies depending on the bit-widths of each element.
const size_t elementSize1 = element_size(layout, type1);
const size_t elementSize2 = element_size(layout, type2);
// A/B: uniform values.
// a/b: padding as part of the uniform type (vec3 takes 4 slots)
// _ : padding between uniforms for alignment
- static constexpr const char* kExpectedLayout[5][5] = {
- { "", "", "", "", "" },
- { "", "AB", "A_BB", "A___BBBb", "A___BBBB" },
- { "", "AAB_", "AABB", "AA__BBBb", "AA__BBBB" },
- { "", "AAAaB___", "AAAaBB__", "AAAaBBBb", "AAAaBBBB" },
- { "", "AAAAB___", "AAAABB__", "AAAABBBb", "AAAABBBB" },
+ static constexpr const char* kExpectedLayout[2][5][5] = {
+ // Metal (vec3 consumes vec4 size)
+ {{ "", "", "", "", "" },
+ { "", "AB", "A_BB", "A___BBBb", "A___BBBB" },
+ { "", "AAB_", "AABB", "AA__BBBb", "AA__BBBB" },
+ { "", "AAAaB___", "AAAaBB__", "AAAaBBBb", "AAAaBBBB" },
+ { "", "AAAAB___", "AAAABB__", "AAAABBBb", "AAAABBBB" }},
+ // std140 and std430 (vec3 aligns to vec4, but consumes only 3 elements)
+ {{ "", "", "", "", "" },
+ { "", "AB", "A_BB", "A___BBBb", "A___BBBB" },
+ { "", "AAB_", "AABB", "AA__BBBb", "AA__BBBB" },
+ { "", "AAAB", "AAA_BB__", "AAA_BBBb", "AAA_BBBB" },
+ { "", "AAAAB___", "AAAABB__", "AAAABBBb", "AAAABBBB" }},
};
- const size_t size = strlen(kExpectedLayout[vecLength1][vecLength2]) *
+ int layoutIdx = static_cast<int>(layout != Layout::kMetal);
+ const size_t size = strlen(kExpectedLayout[layoutIdx][vecLength1][vecLength2]) *
elementSize1;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Layout: %d - Types: %s, %s padding test failed",
- (int)layout, SkSLTypeString(type1), SkSLTypeString(type2));
+ "Layout: %s - Types: %s, %s padding test failed",
+ LayoutString(layout),
+ SkSLTypeString(type1), SkSLTypeString(type2));
} else if (elementSize1 == 2 && elementSize2 == 4) {
// Elements in the array below correspond to 16 bits apiece.
// The expected uniform layout is listed as strings below.
const size_t size = strlen(kExpectedLayout[vecLength1][vecLength2]) * 2;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Layout: %d - Types: %s, %s padding test failed",
- (int)layout, SkSLTypeString(type1), SkSLTypeString(type2));
+ "Layout: %s - Types: %s, %s padding test failed",
+ LayoutString(layout),
+ SkSLTypeString(type1), SkSLTypeString(type2));
} else if (elementSize1 == 4 && elementSize2 == 2) {
// Elements in the array below correspond to 16 bits apiece.
// The expected uniform layout is listed as strings below.
const size_t size = strlen(kExpectedLayout[vecLength1][vecLength2]) * 2;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Layout: %d - Types: %s, %s padding test failed",
- (int)layout, SkSLTypeString(type1), SkSLTypeString(type2));
+ "Layout: %s - Types: %s, %s padding test failed",
+ LayoutString(layout),
+ SkSLTypeString(type1), SkSLTypeString(type2));
} else {
ERRORF(r, "Unexpected element sizes: %zu %zu", elementSize1, elementSize2);
}
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckPaddingVectorMatrix, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager properly adds padding between vectors and matrices.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type1 : kTypes) {
const int vecLength1 = SkSLTypeVecLength(type1);
if (vecLength1 < 1) {
continue;
}
-
for (SkSLType type2 : kTypes) {
const int matSize2 = SkSLTypeMatrixSize(type2);
if (matSize2 < 2) {
continue;
}
-
// Write the scalar/vector and matrix uniforms.
const Uniform expectations[] = {{"a", type1}, {"b", type2}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type1, kFloats);
- mgr.write(type2, kFloats);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ mgr.write(expectations[1], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// The expected packing varies depending on the bit-widths of each element.
const size_t elementSize1 = element_size(layout, type1);
const size_t elementSize2 = element_size(layout, type2);
elementSize1;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Types: %s, %s vector-matrix padding test failed",
+ "Layout: %s - Types: %s, %s vector-matrix padding test failed",
+ LayoutString(layout),
SkSLTypeString(type1), SkSLTypeString(type2));
} else if (elementSize1 == 2 && elementSize2 == 4) {
// Elements in the array below correspond to 16 bits apiece.
const size_t size = strlen(kExpectedLayout[vecLength1][matSize2]) * 2;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Types: %s, %s vector-matrix padding test failed",
+ "Layout: %s - Types: %s, %s vector-matrix padding test failed",
+ LayoutString(layout),
SkSLTypeString(type1), SkSLTypeString(type2));
} else if (elementSize1 == 4 && elementSize2 == 2) {
// Elements in the array below correspond to 16 bits apiece.
const size_t size = strlen(kExpectedLayout[vecLength1][matSize2]) * 2;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Types: %s, %s vector-matrix padding test failed",
+ "Layout: %s - Types: %s, %s vector-matrix padding test failed",
+ LayoutString(layout),
SkSLTypeString(type1), SkSLTypeString(type2));
}
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerCheckPaddingMatrixVector, r, CtsEnforcement::kNextRelease) {
// Verify that the uniform manager properly adds padding between matrices and vectors.
for (Layout layout : kLayouts) {
UniformManager mgr(layout);
-
for (SkSLType type1 : kTypes) {
const int matSize1 = SkSLTypeMatrixSize(type1);
if (matSize1 < 2) {
continue;
}
-
for (SkSLType type2 : kTypes) {
const int vecLength2 = SkSLTypeVecLength(type2);
if (vecLength2 < 1) {
continue;
}
-
// Write the scalar/vector and matrix uniforms.
const Uniform expectations[] = {{"a", type1}, {"b", type2}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type1, kFloats);
- mgr.write(type2, kFloats);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ mgr.write(expectations[1], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
// The expected packing varies depending on the bit-widths of each element.
const size_t elementSize1 = element_size(layout, type1);
const size_t elementSize2 = element_size(layout, type2);
{
{ "", "", "", "", "" },
{ "", "", "", "", "" },
- { "", "AAaaAAaaB_", "AAaaAAaaBB", "AAaaAAaaBBBb", "AAaaAAaaBBBB" },
+ { "", "AAaaAAaaB___", "AAaaAAaaBB__", "AAaaAAaaBBBb", "AAaaAAaaBBBB" },
{ "",
"AAAaAAAaAAAaB___",
"AAAaAAAaAAAaBB__",
elementSize1;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Types: %s, %s matrix-vector padding test failed",
+ "Layout: %s - Types: %s, %s matrix-vector padding test failed",
+ LayoutString(layout),
SkSLTypeString(type1), SkSLTypeString(type2));
} else if (elementSize1 == 2 && elementSize2 == 4) {
// Elements in the array below correspond to 16 bits apiece.
const size_t size = strlen(kExpectedLayout[matSize1][vecLength2]) * 2;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Types: %s, %s matrix-vector padding test failed",
+ "Layout: %s - Types: %s, %s matrix-vector padding test failed",
+ LayoutString(layout),
SkSLTypeString(type1), SkSLTypeString(type2));
} else if (elementSize1 == 4 && elementSize2 == 2) {
// Elements in the array below correspond to 16 bits apiece.
const size_t size = strlen(kExpectedLayout[matSize1][vecLength2]) * 2;
UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == size,
- "Types: %s, %s matrix-vector padding test failed",
+ "Layout: %s - Types: %s, %s matrix-vector padding test failed",
+ LayoutString(layout),
SkSLTypeString(type1), SkSLTypeString(type2));
}
mgr.reset();
}
}
}
-
DEF_GRAPHITE_TEST(UniformManagerMetalArrayLayout, r, CtsEnforcement::kNextRelease) {
UniformManager mgr(Layout::kMetal);
-
// Tests set up a uniform block with a single half (to force alignment) and an array of 3
// elements. Test every type that can appear in an array.
constexpr size_t kArraySize = 3;
-
// Buffer large enough to hold a float4x4[3] array.
static constexpr uint8_t kBuffer[192] = {};
static const char* kExpectedLayout[] = {
// A/B: uniform values.
// a/b: padding as part of the uniform type.
// _ : padding between uniforms for alignment.
-
- /* {half, short[3]} */ "AABBBBBB",
- /* {half, short2[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, short3[3]} */ "AA______BBBBBBbbBBBBBBbbBBBBBBbb",
- /* {half, short4[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, float[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, float2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, float3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, float4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, half[3]} */ "AABBBBBB",
- /* {half, half2[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, half3[3]} */ "AA______BBBBBBbbBBBBBBbbBBBBBBbb",
- /* {half, half4[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, int[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, int2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, int3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, int4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
-
+ /* {half, float[3]} */ "AA__BBBBBBBBBBBB",
+ /* {half, float2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, float3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, float4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, half[3]} */ "AABBBBBB",
+ /* {half, half2[3]} */ "AA__BBBBBBBBBBBB",
+ /* {half, half3[3]} */ "AA______BBBBBBbbBBBBBBbbBBBBBBbb",
+ /* {half, half4[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, int[3]} */ "AA__BBBBBBBBBBBB",
+ /* {half, int2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, int3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, int4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
/* {half, float2x2[3] */ "AA______BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
/* {half, float3x3[3] */ "AA______________"
"BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
-
- /* {half, half2x2[3] */ "AA__BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, half3x3[3] */ "AA______"
+ /* {half, half2x2[3] */ "AA__BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, half3x3[3] */ "AA______"
"BBBBBBbbBBBBBBbbBBBBBBbb"
"BBBBBBbbBBBBBBbbBBBBBBbb"
"BBBBBBbbBBBBBBbbBBBBBBbb",
- /* {half, half4x4[3] */ "AA______"
+ /* {half, half4x4[3] */ "AA______"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
for (size_t i = 0; i < std::size(kExpectedLayout); i++) {
const SkSLType arrayType = kTypes[i];
const Uniform expectations[] = {{"a", SkSLType::kHalf}, {"b", arrayType, kArraySize}};
-
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(SkSLType::kHalf, kHalfs);
- mgr.writeArray(arrayType, kBuffer, kArraySize);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kHalfs);
+ mgr.write(expectations[1], kBuffer);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const size_t expectedSize = strlen(kExpectedLayout[i]);
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == expectedSize,
"array test %d for type %s failed - expected size: %zu, actual size: %zu",
(int)i, SkSLTypeString(arrayType), expectedSize, uniformData.size());
-
mgr.reset();
}
}
-
-DEF_GRAPHITE_TEST(UniformManagerStd431ArrayLayout, r, CtsEnforcement::kNextRelease) {
+DEF_GRAPHITE_TEST(UniformManagerStd430ArrayLayout, r, CtsEnforcement::kNextRelease) {
UniformManager mgr(Layout::kStd430);
-
// Tests set up a uniform block with a single half (to force alignment) and an array of 3
// elements. Test every type that can appear in an array.
constexpr size_t kArraySize = 3;
-
// Buffer large enough to hold a float4x4[3] array.
static constexpr uint8_t kBuffer[192] = {};
static const char* kExpectedLayout[] = {
// A/B: uniform values.
// a/b: padding as part of the uniform type.
// _ : padding between uniforms for alignment.
-
- /* {half, short[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, short2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, short3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, short4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, float[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, float2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, float3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, float4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, half[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, half2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, half3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, half4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, int[3]} */ "AA__BBBBBBBBBBBB",
- /* {half, int2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, int3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, int4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
-
+ /* {half, float[3]} */ "AA__BBBBBBBBBBBB",
+ /* {half, float2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, float3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, float4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, half[3]} */ "AA__BBBBBBBBBBBB",
+ /* {half, half2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, half3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, half4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, int[3]} */ "AA__BBBBBBBBBBBB",
+ /* {half, int2[3]} */ "AA______BBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, int3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, int4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
/* {half, float2x2[3] */ "AA______BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
/* {half, float3x3[3] */ "AA______________"
"BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
-
/* {half, half2x2[3] */ "AA______BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
/* {half, half3x3[3] */ "AA______________"
"BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb"
for (size_t i = 0; i < std::size(kExpectedLayout); i++) {
const SkSLType arrayType = kTypes[i];
const Uniform expectations[] = {{"a", SkSLType::kHalf}, {"b", arrayType, kArraySize}};
-
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(SkSLType::kHalf, kHalfs);
- mgr.writeArray(arrayType, kBuffer, kArraySize);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kHalfs);
+ mgr.write(expectations[1], kBuffer);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const size_t expectedSize = strlen(kExpectedLayout[i]);
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == expectedSize,
"array test %d for type %s failed - expected size: %zu, actual size: %zu",
(int)i, SkSLTypeString(arrayType), expectedSize, uniformData.size());
-
mgr.reset();
}
}
-
DEF_GRAPHITE_TEST(UniformManagerStd140ArrayLayout, r, CtsEnforcement::kNextRelease) {
UniformManager mgr(Layout::kStd140);
-
// Tests set up a uniform block with a single half (to force alignment) and an array of 3
// elements. Test every type that can appear in an array.
constexpr size_t kArraySize = 3;
-
// Buffer large enough to hold a float4x4[3] array.
static constexpr uint8_t kBuffer[192] = {};
static const char* kExpectedLayout[] = {
// A/B: uniform values.
// a/b: padding as part of the uniform type.
// _ : padding between uniforms for alignment.
-
- /* {half, short[3]} */ "AA______________BBbbbbbbbbbbbbbbBBbbbbbbbbbbbbbbBBbbbbbbbbbbbbbb",
- /* {half, short2[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
- /* {half, short3[3]} */ "AA______________BBBBBBbbbbbbbbbbBBBBBBbbbbbbbbbbBBBBBBbbbbbbbbbb",
- /* {half, short4[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
- /* {half, float[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
- /* {half, float2[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
- /* {half, float3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, float4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
- /* {half, half[3]} */ "AA______________BBbbbbbbbbbbbbbbBBbbbbbbbbbbbbbbBBbbbbbbbbbbbbbb",
- /* {half, half2[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
- /* {half, half3[3]} */ "AA______________BBBBBBbbbbbbbbbbBBBBBBbbbbbbbbbbBBBBBBbbbbbbbbbb",
- /* {half, half4[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
- /* {half, int[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
- /* {half, int2[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
- /* {half, int3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
- /* {half, int4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
-
+ /* {half, float[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
+ /* {half, float2[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
+ /* {half, float3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, float4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
+ /* {half, half[3]} */ "AA______________BBbbbbbbbbbbbbbbBBbbbbbbbbbbbbbbBBbbbbbbbbbbbbbb",
+ /* {half, half2[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
+ /* {half, half3[3]} */ "AA______________BBBBBBbbbbbbbbbbBBBBBBbbbbbbbbbbBBBBBBbbbbbbbbbb",
+ /* {half, half4[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
+ /* {half, int[3]} */ "AA______________BBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbbBBBBbbbbbbbbbbbb",
+ /* {half, int2[3]} */ "AA______________BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb",
+ /* {half, int3[3]} */ "AA______________BBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbbBBBBBBBBBBBBbbbb",
+ /* {half, int4[3]} */ "AA______________BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
/* {half, float2x2[3] */ "AA______________"
"BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb"
"BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
-
/* {half, half2x2[3] */ "AA______________"
"BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb"
"BBBBBBBBbbbbbbbbBBBBBBBBbbbbbbbb"
for (size_t i = 0; i < std::size(kExpectedLayout); i++) {
const SkSLType arrayType = kTypes[i];
const Uniform expectations[] = {{"a", SkSLType::kHalf}, {"b", arrayType, kArraySize}};
-
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(SkSLType::kHalf, kHalfs);
- mgr.writeArray(arrayType, kBuffer, kArraySize);
- mgr.doneWithExpectedUniforms();
-
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kHalfs);
+ mgr.write(expectations[1], kBuffer);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const size_t expectedSize = strlen(kExpectedLayout[i]);
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == expectedSize,
"array test %d for type %s failed - expected size: %zu, actual size: %zu",
(int)i, SkSLTypeString(arrayType), expectedSize, uniformData.size());
-
mgr.reset();
}
}
-
// This test validates that the uniform data for matrix types get written out according to the
// layout expectations.
DEF_GRAPHITE_TEST(UniformManagerStd140MatrixLayoutContents, r, CtsEnforcement::kNextRelease) {
UniformManager mgr(Layout::kStd140);
-
// float2x2, half2x2
for (SkSLType type : {SkSLType::kFloat2x2, SkSLType::kHalf2x2}) {
const Uniform expectations[] = {{"m", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == 32,
"%s layout size expected 32, got %zu",
SkSLTypeString(type), uniformData.size());
-
// The expected offsets of the 4 matrix elements.
const int kOffsets[4] = {0, 1, 4, 5};
const float* elements = reinterpret_cast<const float*>(uniformData.data());
}
mgr.reset();
}
-
// float3x3, half3x3
for (SkSLType type : {SkSLType::kFloat3x3, SkSLType::kHalf3x3}) {
const Uniform expectations[] = {{"m", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == 48,
"%s layout size expected 48, got %zu",
SkSLTypeString(type), uniformData.size());
-
// The expected offsets of the 9 matrix elements.
const int kOffsets[9] = {0, 1, 2, 4, 5, 6, 8, 9, 10};
const float* elements = reinterpret_cast<const float*>(uniformData.data());
mgr.reset();
}
}
-
// This test validates that the uniform data for matrix types get written out according to the
// layout expectations.
DEF_GRAPHITE_TEST(UniformManagerStd430MatrixLayoutContents, r, CtsEnforcement::kNextRelease) {
UniformManager mgr(Layout::kStd430);
-
// float2x2, half2x2
for (SkSLType type : {SkSLType::kFloat2x2, SkSLType::kHalf2x2}) {
const Uniform expectations[] = {{"m", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == 16,
"%s layout size expected 16, got %zu",
SkSLTypeString(type), uniformData.size());
-
// The expected offsets of the 4 matrix elements. This uses a tighter packing than std140
// layout.
const int kOffsets[4] = {0, 1, 2, 3};
}
mgr.reset();
}
-
// float3x3, half3x3
for (SkSLType type : {SkSLType::kFloat3x3, SkSLType::kHalf3x3}) {
const Uniform expectations[] = {{"m", type}};
- mgr.setExpectedUniforms(SkSpan(expectations));
- mgr.write(type, kFloats);
- mgr.doneWithExpectedUniforms();
+ SkDEBUGCODE(mgr.setExpectedUniforms(SkSpan(expectations));)
+ mgr.write(expectations[0], kFloats);
+ SkDEBUGCODE(mgr.doneWithExpectedUniforms();)
const UniformDataBlock uniformData = mgr.finishUniformDataBlock();
REPORTER_ASSERT(r, uniformData.size() == 48,
"%s layout size expected 48, got %zu",
SkSLTypeString(type), uniformData.size());
-
// The expected offsets of the 9 matrix elements. This is the same as std140 layout.
const int kOffsets[9] = {0, 1, 2, 4, 5, 6, 8, 9, 10};
const float* elements = reinterpret_cast<const float*>(uniformData.data());
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
-
#include "src/core/SkSLTypeShared.h"
#include "src/gpu/graphite/Uniform.h"
#include "src/gpu/graphite/UniformManager.h"
#include "tests/Test.h"
-
using namespace skgpu::graphite;
-
namespace {
-
// Used to test the exact alignment and size of an individual type. Returns the alignment and size
// as a pair.
struct AlignmentAndSize {
size_t alignment = calc.advanceOffset(type, arrayCount);
return {alignment, calc.size() - alignment};
}
-
#define EXPECT(type, expectedAlignment, expectedSize) \
do { \
auto [alignment, size] = calculate_alignment_and_size(kLayout, type); \
expectedSize, \
size); \
} while (0)
-
#define EXPECT_ARRAY(type, expectedAlignment, expectedStride, expectedSize) \
do { \
auto [alignment, size] = calculate_alignment_and_size(kLayout, type, kCount); \
expectedStride, \
stride); \
} while (0)
-
DEF_GRAPHITE_TEST(UniformOffsetCalculatorMetalBasicTypesTest, r, CtsEnforcement::kNextRelease) {
constexpr Layout kLayout = Layout::kMetal;
-
- // scalars: int, float, short, half (unsigned types are disallowed)
+ // scalars: int, float, half (unsigned types are disallowed)
EXPECT(SkSLType::kInt, /*alignment=*/4, /*size=*/4);
EXPECT(SkSLType::kFloat, /*alignment=*/4, /*size=*/4);
- EXPECT(SkSLType::kShort, /*alignment=*/2, /*size=*/2);
EXPECT(SkSLType::kHalf, /*alignment=*/2, /*size=*/2);
-
- // int2, float2, short2, half2
+ // int2, float2, half2
EXPECT(SkSLType::kInt2, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kFloat2, /*alignment=*/8, /*size=*/8);
- EXPECT(SkSLType::kShort2, /*alignment=*/4, /*size=*/4);
EXPECT(SkSLType::kHalf2, /*alignment=*/4, /*size=*/4);
-
- // int3, float3, short3, half3
+ // int3, float3, half3 (unlike std430, size is also rounded up)
EXPECT(SkSLType::kInt3, /*alignment=*/16, /*size=*/16);
EXPECT(SkSLType::kFloat3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kShort3, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kHalf3, /*alignment=*/8, /*size=*/8);
-
- // int4, float4, short4, half4
+ // int4, float4, half4
EXPECT(SkSLType::kInt4, /*alignment=*/16, /*size=*/16);
EXPECT(SkSLType::kFloat4, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kShort4, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kHalf4, /*alignment=*/8, /*size=*/8);
-
// float2x2, half2x2
EXPECT(SkSLType::kFloat2x2, /*alignment=*/8, /*size=*/16);
EXPECT(SkSLType::kHalf2x2, /*alignment=*/4, /*size=*/8);
-
// float3x3, half3x3
EXPECT(SkSLType::kFloat3x3, /*alignment=*/16, /*size=*/48);
EXPECT(SkSLType::kHalf3x3, /*alignment=*/8, /*size=*/24);
-
// float4x4, half4x4
EXPECT(SkSLType::kFloat4x4, /*alignment=*/16, /*size=*/64);
EXPECT(SkSLType::kHalf4x4, /*alignment=*/8, /*size=*/32);
}
-
DEF_GRAPHITE_TEST(UniformOffsetCalculatorMetalArrayTest, r, CtsEnforcement::kNextRelease) {
constexpr Layout kLayout = Layout::kMetal;
constexpr size_t kCount = 3;
-
- // int[3], float[3], short[3], half[3]
+ // int[3], float[3], half[3]
EXPECT_ARRAY(SkSLType::kInt, /*alignment=*/4, /*stride=*/4, /*size=*/12);
EXPECT_ARRAY(SkSLType::kFloat, /*alignment=*/4, /*stride=*/4, /*size=*/12);
- EXPECT_ARRAY(SkSLType::kShort, /*alignment=*/2, /*stride=*/2, /*size=*/6);
EXPECT_ARRAY(SkSLType::kHalf, /*alignment=*/2, /*stride=*/2, /*size=*/6);
-
- // int2[3], float2[3], short2[3], half2[3]
+ // int2[3], float2[3], half2[3]
EXPECT_ARRAY(SkSLType::kInt2, /*alignment=*/8, /*stride=*/8, /*size=*/24);
EXPECT_ARRAY(SkSLType::kFloat2, /*alignment=*/8, /*stride=*/8, /*size=*/24);
- EXPECT_ARRAY(SkSLType::kShort2, /*alignment=*/4, /*stride=*/4, /*size=*/12);
EXPECT_ARRAY(SkSLType::kHalf2, /*alignment=*/4, /*stride=*/4, /*size=*/12);
-
- // int3[3], float3[3], short3[3], half3[3]
+ // int3[3], float3[3], half3[3]
EXPECT_ARRAY(SkSLType::kInt3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort3, /*alignment=*/8, /*stride=*/8, /*size=*/24);
EXPECT_ARRAY(SkSLType::kHalf3, /*alignment=*/8, /*stride=*/8, /*size=*/24);
-
- // int4[3], float4[3], short4[3], half4[3]
+ // int4[3], float4[3], half4[3]
EXPECT_ARRAY(SkSLType::kInt4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort4, /*alignment=*/8, /*stride=*/8, /*size=*/24);
EXPECT_ARRAY(SkSLType::kHalf4, /*alignment=*/8, /*stride=*/8, /*size=*/24);
-
// float2x2[3], half2x2[3]
EXPECT_ARRAY(SkSLType::kFloat2x2, /*alignment=*/8, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf2x2, /*alignment=*/4, /*stride=*/8, /*size=*/24);
-
// float3x3[3], half3x3[3]
EXPECT_ARRAY(SkSLType::kFloat3x3, /*alignment=*/16, /*stride=*/48, /*size=*/144);
EXPECT_ARRAY(SkSLType::kHalf3x3, /*alignment=*/8, /*stride=*/24, /*size=*/72);
-
// float4x4[3], half4x4[3]
EXPECT_ARRAY(SkSLType::kFloat4x4, /*alignment=*/16, /*stride=*/64, /*size=*/192);
EXPECT_ARRAY(SkSLType::kHalf4x4, /*alignment=*/8, /*stride=*/32, /*size=*/96);
}
-
DEF_GRAPHITE_TEST(UniformOffsetCalculatorStd430BasicTypesTest, r, CtsEnforcement::kNextRelease) {
constexpr Layout kLayout = Layout::kStd430;
-
- // scalars: int, float, short, half (unsigned types are disallowed)
+ // scalars: int, float, half (unsigned types are disallowed)
EXPECT(SkSLType::kInt, /*alignment=*/4, /*size=*/4);
EXPECT(SkSLType::kFloat, /*alignment=*/4, /*size=*/4);
- EXPECT(SkSLType::kShort, /*alignment=*/4, /*size=*/4);
EXPECT(SkSLType::kHalf, /*alignment=*/4, /*size=*/4);
-
- // int2, float2, short2, half2
+ // int2, float2, half2
EXPECT(SkSLType::kInt2, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kFloat2, /*alignment=*/8, /*size=*/8);
- EXPECT(SkSLType::kShort2, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kHalf2, /*alignment=*/8, /*size=*/8);
-
- // int3, float3, short3, half3
- EXPECT(SkSLType::kInt3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kFloat3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kShort3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kHalf3, /*alignment=*/16, /*size=*/16);
-
- // int4, float4, short4, half4
+ // int3, float3, half3 (size is not rounded up for non-arrays of vec3s)
+ EXPECT(SkSLType::kInt3, /*alignment=*/16, /*size=*/12);
+ EXPECT(SkSLType::kFloat3, /*alignment=*/16, /*size=*/12);
+ EXPECT(SkSLType::kHalf3, /*alignment=*/16, /*size=*/12);
+ // int4, float4, half4
EXPECT(SkSLType::kInt4, /*alignment=*/16, /*size=*/16);
EXPECT(SkSLType::kFloat4, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kShort4, /*alignment=*/16, /*size=*/16);
EXPECT(SkSLType::kHalf4, /*alignment=*/16, /*size=*/16);
-
// float2x2, half2x2
EXPECT(SkSLType::kFloat2x2, /*alignment=*/8, /*size=*/16);
EXPECT(SkSLType::kHalf2x2, /*alignment=*/8, /*size=*/16);
-
// float3x3, half3x3
EXPECT(SkSLType::kFloat3x3, /*alignment=*/16, /*size=*/48);
EXPECT(SkSLType::kHalf3x3, /*alignment=*/16, /*size=*/48);
-
// float4x4, half4x4
EXPECT(SkSLType::kFloat4x4, /*alignment=*/16, /*size=*/64);
EXPECT(SkSLType::kHalf4x4, /*alignment=*/16, /*size=*/64);
}
-
DEF_GRAPHITE_TEST(UniformOffsetCalculatorStd430ArrayTest, r, CtsEnforcement::kNextRelease) {
constexpr Layout kLayout = Layout::kStd430;
constexpr size_t kCount = 3;
-
- // int[3], float[3], short[3], half[3]
+ // int[3], float[3], half[3]
EXPECT_ARRAY(SkSLType::kInt, /*alignment=*/4, /*stride=*/4, /*size=*/12);
EXPECT_ARRAY(SkSLType::kFloat, /*alignment=*/4, /*stride=*/4, /*size=*/12);
- EXPECT_ARRAY(SkSLType::kShort, /*alignment=*/4, /*stride=*/4, /*size=*/12);
EXPECT_ARRAY(SkSLType::kHalf, /*alignment=*/4, /*stride=*/4, /*size=*/12);
-
- // int2[3], float2[3], short2[3], half2[3]
+ // int2[3], float2[3], half2[3]
EXPECT_ARRAY(SkSLType::kInt2, /*alignment=*/8, /*stride=*/8, /*size=*/24);
EXPECT_ARRAY(SkSLType::kFloat2, /*alignment=*/8, /*stride=*/8, /*size=*/24);
- EXPECT_ARRAY(SkSLType::kShort2, /*alignment=*/8, /*stride=*/8, /*size=*/24);
EXPECT_ARRAY(SkSLType::kHalf2, /*alignment=*/8, /*stride=*/8, /*size=*/24);
-
- // int3[3], float3[3], short3[3], half3[3]
+ // int3[3], float3[3], half3[3] (stride is rounded up in arrays)
EXPECT_ARRAY(SkSLType::kInt3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
-
- // int4[3], float4[3], short4[3], half4[3]
+ // int4[3], float4[3], half4[3]
EXPECT_ARRAY(SkSLType::kInt4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
-
// float2x2[3], half2x2[3]
EXPECT_ARRAY(SkSLType::kFloat2x2, /*alignment=*/8, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf2x2, /*alignment=*/8, /*stride=*/16, /*size=*/48);
-
// float3x3[3], half3x3[3]
EXPECT_ARRAY(SkSLType::kFloat3x3, /*alignment=*/16, /*stride=*/48, /*size=*/144);
EXPECT_ARRAY(SkSLType::kHalf3x3, /*alignment=*/16, /*stride=*/48, /*size=*/144);
-
// float4x4[3], half4x4[3]
EXPECT_ARRAY(SkSLType::kFloat4x4, /*alignment=*/16, /*stride=*/64, /*size=*/192);
EXPECT_ARRAY(SkSLType::kHalf4x4, /*alignment=*/16, /*stride=*/64, /*size=*/192);
}
-
DEF_GRAPHITE_TEST(UniformOffsetCalculatorStd140BasicTypesTest, r, CtsEnforcement::kNextRelease) {
constexpr Layout kLayout = Layout::kStd140;
-
- // scalars: int, float, short, half (unsigned types are disallowed)
+ // scalars: int, float, half (unsigned types are disallowed)
EXPECT(SkSLType::kInt, /*alignment=*/4, /*size=*/4);
EXPECT(SkSLType::kFloat, /*alignment=*/4, /*size=*/4);
- EXPECT(SkSLType::kShort, /*alignment=*/4, /*size=*/4);
EXPECT(SkSLType::kHalf, /*alignment=*/4, /*size=*/4);
-
- // int2, float2, short2, half2
+ // int2, float2, half2
EXPECT(SkSLType::kInt2, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kFloat2, /*alignment=*/8, /*size=*/8);
- EXPECT(SkSLType::kShort2, /*alignment=*/8, /*size=*/8);
EXPECT(SkSLType::kHalf2, /*alignment=*/8, /*size=*/8);
-
- // int3, float3, short3, half3
- EXPECT(SkSLType::kInt3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kFloat3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kShort3, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kHalf3, /*alignment=*/16, /*size=*/16);
-
- // int4, float4, short4, half4
+ // int3, float3, half3 (size is not rounded up for non-arrays of vec3s)
+ EXPECT(SkSLType::kInt3, /*alignment=*/16, /*size=*/12);
+ EXPECT(SkSLType::kFloat3, /*alignment=*/16, /*size=*/12);
+ EXPECT(SkSLType::kHalf3, /*alignment=*/16, /*size=*/12);
+ // int4, float4, half4
EXPECT(SkSLType::kInt4, /*alignment=*/16, /*size=*/16);
EXPECT(SkSLType::kFloat4, /*alignment=*/16, /*size=*/16);
- EXPECT(SkSLType::kShort4, /*alignment=*/16, /*size=*/16);
EXPECT(SkSLType::kHalf4, /*alignment=*/16, /*size=*/16);
-
// float2x2, half2x2
EXPECT(SkSLType::kFloat2x2, /*alignment=*/16, /*size=*/32);
EXPECT(SkSLType::kHalf2x2, /*alignment=*/16, /*size=*/32);
-
// float3x3, half3x3
EXPECT(SkSLType::kFloat3x3, /*alignment=*/16, /*size=*/48);
EXPECT(SkSLType::kHalf3x3, /*alignment=*/16, /*size=*/48);
-
// float4x4, half4x4
EXPECT(SkSLType::kFloat4x4, /*alignment=*/16, /*size=*/64);
EXPECT(SkSLType::kHalf4x4, /*alignment=*/16, /*size=*/64);
}
-
DEF_GRAPHITE_TEST(UniformOffsetCalculatorStd140ArrayTest, r, CtsEnforcement::kNextRelease) {
constexpr Layout kLayout = Layout::kStd140;
constexpr uint32_t kCount = 3;
-
- // int[3], float[3], short[3], half[3]
+ // int[3], float[3], half[3]
EXPECT_ARRAY(SkSLType::kInt, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf, /*alignment=*/16, /*stride=*/16, /*size=*/48);
-
- // int2[3], float2[3], short2[3], half2[3]
+ // int2[3], float2[3], half2[3]
EXPECT_ARRAY(SkSLType::kInt2, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat2, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort2, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf2, /*alignment=*/16, /*stride=*/16, /*size=*/48);
-
- // int3[3], float3[3], short3[3], half3[3]
+ // int3[3], float3[3], half3[3]
EXPECT_ARRAY(SkSLType::kInt3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf3, /*alignment=*/16, /*stride=*/16, /*size=*/48);
-
- // int4[3], float4[3], short4[3], half4[3]
+ // int4[3], float4[3], half4[3]
EXPECT_ARRAY(SkSLType::kInt4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kFloat4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
- EXPECT_ARRAY(SkSLType::kShort4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
EXPECT_ARRAY(SkSLType::kHalf4, /*alignment=*/16, /*stride=*/16, /*size=*/48);
-
// float2x2[3], half2x2[3]
EXPECT_ARRAY(SkSLType::kFloat2x2, /*alignment=*/16, /*stride=*/32, /*size=*/96);
EXPECT_ARRAY(SkSLType::kHalf2x2, /*alignment=*/16, /*stride=*/32, /*size=*/96);
-
// float3x3[3], half3x3[3]
EXPECT_ARRAY(SkSLType::kFloat3x3, /*alignment=*/16, /*stride=*/48, /*size=*/144);
EXPECT_ARRAY(SkSLType::kHalf3x3, /*alignment=*/16, /*stride=*/48, /*size=*/144);
-
// float4x4[3], half4x4[3]
EXPECT_ARRAY(SkSLType::kFloat4x4, /*alignment=*/16, /*stride=*/64, /*size=*/192);
EXPECT_ARRAY(SkSLType::kHalf4x4, /*alignment=*/16, /*stride=*/64, /*size=*/192);
}
-
} // namespace
// increment the iterator past the end, we remain at the past-the-end iterator
// condition.
if (child_ && parent_) {
- if (child_ == (parent_->*LastChild)())
+ if (child_ == (parent_.get()->*LastChild)())
child_ = nullptr;
else
- child_ = (child_->*NextSibling)();
+ child_ = (child_.get()->*NextSibling)();
}
return *this;
// If the iterator is past the end, |child_=nullptr|, decrement the iterator
// gives us the last iterator element.
if (!child_)
- child_ = (parent_->*LastChild)();
+ child_ = (parent_.get()->*LastChild)();
// Decrement the iterator gives us the previous element, except when the
// iterator is at the beginning; in which case, decrementing the iterator
// remains at the beginning.
- else if (child_ != (parent_->*FirstChild)())
- child_ = (child_->*PreviousSibling)();
+ else if (child_ != (parent_.get()->*FirstChild)())
+ child_ = (child_.get()->*PreviousSibling)();
}
return *this;
const gfx::Size& operator*() const { return *get(); }
const gfx::Size* get() const {
if (!size_)
- size_ = (view_->*size_func_)();
+ size_ = (view_.get()->*size_func_)();
return &size_.value();
}
LazyDimension width() const {
void EmitMovesFromSource(RegisterT source_reg, GapMoveTargets&& targets) {
DCHECK(moves_from_register_[source_reg.code()].is_empty());
if constexpr (DecompressIfNeeded) {
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
static_assert(COMPRESS_POINTERS_BOOL);
+#endif
if (targets.needs_decompression == kNeedsDecompression) {
__ DecompressTagged(source_reg, source_reg);
}
// Decompress after the first move, subsequent moves reuse this register so
// they're guaranteed to be decompressed.
if constexpr (DecompressIfNeeded) {
+#if !defined(COMPILER_GCC) || defined(__clang__) // FIXME
static_assert(COMPRESS_POINTERS_BOOL);
+#endif
if (targets.needs_decompression == kNeedsDecompression) {
__ DecompressTagged(register_with_slot_value, register_with_slot_value);
targets.needs_decompression = kDoesNotNeedDecompression;